Skip to content
2 changes: 1 addition & 1 deletion pandas/_version.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i think this file is auto-generated

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

reverted

print(f"unable to find command, tried {commands}")
return None
stdout = p.communicate()[0].strip().decode()
if p.returncode != 0:
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/arrays/period.py
Original file line number Diff line number Diff line change
Expand Up @@ -600,7 +600,7 @@ def _format_native_types(self, na_rep="NaT", date_format=None, **kwargs):
if date_format:
formatter = lambda dt: dt.strftime(date_format)
else:
formatter = lambda dt: "%s" % dt
formatter = lambda dt: f"{dt}"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

would str(dt) be clearer here?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sure - agree


if self._hasnans:
mask = self._isnan
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -462,7 +462,7 @@ def pipe(obj, func, *args, **kwargs):
if isinstance(func, tuple):
func, target = func
if target in kwargs:
msg = "%s is both the pipe target and a keyword argument" % target
msg = f"{target} is both the pipe target and a keyword argument"
raise ValueError(msg)
kwargs[target] = obj
return func(*args, **kwargs)
Expand Down
27 changes: 13 additions & 14 deletions pandas/core/frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -1806,7 +1806,7 @@ def to_records(
if isinstance(self.index, ABCMultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = "level_%d" % count
index_names[i] = f"level_{count}"
count += 1
elif index_names[0] is None:
index_names = ["index"]
Expand Down Expand Up @@ -2083,7 +2083,7 @@ def to_stata(
data_label=data_label,
write_index=write_index,
variable_labels=variable_labels,
**kwargs
**kwargs,
)
writer.write_file()

Expand All @@ -2107,7 +2107,7 @@ def to_parquet(
compression="snappy",
index=None,
partition_cols=None,
**kwargs
**kwargs,
):
"""
Write a DataFrame to the binary parquet format.
Expand Down Expand Up @@ -2187,7 +2187,7 @@ def to_parquet(
compression=compression,
index=index,
partition_cols=partition_cols,
**kwargs
**kwargs,
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is this a black version thing?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Weirdly using 19.3b0 locally seems to add this for me - I've reverted

Copy link
Member Author

@alimcmaster1 alimcmaster1 Nov 11, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Black code check in CI fail without this - i've added this back in

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Personally, I find this quite odd that you have to add a comma at the end.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Related to this - #29607

)

@Substitution(
Expand Down Expand Up @@ -2455,7 +2455,7 @@ def info(
exceeds_info_cols = len(self.columns) > max_cols

def _verbose_repr():
lines.append("Data columns (total %d columns):" % len(self.columns))
lines.append(f"Data columns (total {len(self.columns)} columns):")
space = max(len(pprint_thing(k)) for k in self.columns) + 4
counts = None

Expand Down Expand Up @@ -2847,7 +2847,7 @@ def _getitem_bool_array(self, key):
)
elif len(key) != len(self.index):
raise ValueError(
"Item wrong length %d instead of %d." % (len(key), len(self.index))
f"Item wrong length {len(key)} instead of {len(self.index)}."
)

# check_bool_indexer will throw exception if Series key cannot
Expand Down Expand Up @@ -2958,7 +2958,7 @@ def _setitem_array(self, key, value):
if com.is_bool_indexer(key):
if len(key) != len(self.index):
raise ValueError(
"Item wrong length %d instead of %d!" % (len(key), len(self.index))
f"Item wrong length {len(key)} instead of {len(self.index)}!"
)
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
Expand Down Expand Up @@ -4129,7 +4129,7 @@ def fillna(
inplace=False,
limit=None,
downcast=None,
**kwargs
**kwargs,
):
return super().fillna(
value=value,
Expand All @@ -4138,7 +4138,7 @@ def fillna(
inplace=inplace,
limit=limit,
downcast=downcast,
**kwargs
**kwargs,
)

@Appender(_shared_docs["replace"] % _shared_doc_kwargs)
Expand Down Expand Up @@ -4556,8 +4556,8 @@ def _maybe_casted_values(index, labels=None):
if not drop:
if isinstance(self.index, ABCMultiIndex):
names = [
n if n is not None else ("level_%d" % i)
for (i, n) in enumerate(self.index.names)
(n if n is not None else f"level_{i}")
for i, n in enumerate(self.index.names)
]
to_insert = zip(self.index.levels, self.index.codes)
else:
Expand Down Expand Up @@ -4877,8 +4877,7 @@ def sort_values(
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError(
"Length of ascending (%d) != length of by (%d)"
% (len(ascending), len(by))
f"Length of ascending ({len(ascending)}) != length of by ({len(by)})"
)
if len(by) > 1:
from pandas.core.sorting import lexsort_indexer
Expand Down Expand Up @@ -6585,7 +6584,7 @@ def _gotitem(
see_also=_agg_summary_and_see_also_doc,
examples=_agg_examples_doc,
versionadded="\n.. versionadded:: 0.20.0\n",
**_shared_doc_kwargs
**_shared_doc_kwargs,
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, func, axis=0, *args, **kwargs):
Expand Down
40 changes: 19 additions & 21 deletions pandas/core/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -427,8 +427,7 @@ def _construct_axes_from_arguments(
if a in kwargs:
if alias in kwargs:
raise TypeError(
"arguments are mutually exclusive "
"for [%s,%s]" % (a, alias)
f"arguments are mutually exclusive for [{a},{alias}]"
)
continue
if alias in kwargs:
Expand Down Expand Up @@ -760,7 +759,7 @@ def transpose(self, *args, **kwargs):

# we must have unique axes
if len(axes) != len(set(axes)):
raise ValueError("Must specify %s unique axes" % self._AXIS_LEN)
raise ValueError(f"Must specify {self._AXIS_LEN} unique axes")

new_axes = self._construct_axes_dict_from(
self, [self._get_axis(x) for x in axes_names]
Expand Down Expand Up @@ -2060,7 +2059,7 @@ def __getstate__(self):
_typ=self._typ,
_metadata=self._metadata,
attrs=self.attrs,
**meta
**meta,
)

def __setstate__(self, state):
Expand Down Expand Up @@ -2101,7 +2100,7 @@ def __repr__(self) -> str:
# string representation based upon iterating over self
# (since, by definition, `PandasContainers` are iterable)
prepr = "[%s]" % ",".join(map(pprint_thing, self))
return "%s(%s)" % (self.__class__.__name__, prepr)
return f"{self.__class__.__name__}({prepr})"

def _repr_latex_(self):
"""
Expand Down Expand Up @@ -6362,7 +6361,7 @@ def fillna(
elif isinstance(value, ABCDataFrame) and self.ndim == 2:
new_data = self.where(self.notna(), value)
else:
raise ValueError("invalid fill value with a %s" % type(value))
raise ValueError(f"invalid fill value with a {type(value)}")

if inplace:
self._update_inplace(new_data)
Expand Down Expand Up @@ -6799,9 +6798,8 @@ def replace(
if is_list_like(value):
if len(to_replace) != len(value):
raise ValueError(
"Replacement lists must match "
"in length. Expecting %d got %d "
% (len(to_replace), len(value))
f"Replacement lists must match in length."
f" Expecting {len(to_replace)} got {len(value)} "
)

new_data = self._data.replace_list(
Expand Down Expand Up @@ -7055,7 +7053,7 @@ def interpolate(
limit_direction="forward",
limit_area=None,
downcast=None,
**kwargs
**kwargs,
):
"""
Interpolate values according to different methods.
Expand Down Expand Up @@ -7129,7 +7127,7 @@ def interpolate(
limit_area=limit_area,
inplace=inplace,
downcast=downcast,
**kwargs
**kwargs,
)

if inplace:
Expand Down Expand Up @@ -7829,7 +7827,7 @@ def groupby(
group_keys=True,
squeeze=False,
observed=False,
**kwargs
**kwargs,
):
"""
Group DataFrame or Series using a mapper or by a Series of columns.
Expand Down Expand Up @@ -7955,7 +7953,7 @@ def groupby(
group_keys=group_keys,
squeeze=squeeze,
observed=observed,
**kwargs
**kwargs,
)

def asfreq(self, freq, method=None, how=None, normalize=False, fill_value=None):
Expand Down Expand Up @@ -8881,7 +8879,7 @@ def align(
fill_axis=fill_axis,
)
else: # pragma: no cover
raise TypeError("unsupported type: %s" % type(other))
raise TypeError(f"unsupported type: {type(other)}")

def _align_frame(
self,
Expand Down Expand Up @@ -9525,9 +9523,9 @@ def tshift(self, periods=1, freq=None, axis=0):
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods)
else:
msg = "Given freq %s does not match PeriodIndex freq %s" % (
freq.rule_code,
orig_freq.rule_code,
msg = (
f"Given freq {freq.rule_code} does not match"
f" PeriodIndex freq {orig_freq.rule_code}"
)
raise ValueError(msg)
else:
Expand Down Expand Up @@ -9675,7 +9673,7 @@ def truncate(self, before=None, after=None, axis=None, copy=True):

if before is not None and after is not None:
if before > after:
raise ValueError("Truncate: %s must be after %s" % (after, before))
raise ValueError(f"Truncate: {after} must be after {before}")

slicer = [slice(None, None)] * self._AXIS_LEN
slicer[axis] = slice(before, after)
Expand Down Expand Up @@ -9721,7 +9719,7 @@ def _tz_convert(ax, tz):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
"%s is not a valid DatetimeIndex or PeriodIndex" % ax_name
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
else:
ax = DatetimeIndex([], tz=tz)
Expand Down Expand Up @@ -9885,7 +9883,7 @@ def _tz_localize(ax, tz, ambiguous, nonexistent):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
"%s is not a valid DatetimeIndex or PeriodIndex" % ax_name
f"{ax_name} is not a valid DatetimeIndex or PeriodIndex"
)
else:
ax = DatetimeIndex([], tz=tz)
Expand Down Expand Up @@ -11582,7 +11580,7 @@ def stat_func(
level=None,
numeric_only=None,
min_count=0,
**kwargs
**kwargs,
):
if name == "sum":
nv.validate_sum(tuple(), kwargs)
Expand Down
25 changes: 12 additions & 13 deletions pandas/core/indexes/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@ def __new__(
name=None,
fastpath=None,
tupleize_cols=True,
**kwargs
**kwargs,
) -> "Index":

from .range import RangeIndex
Expand Down Expand Up @@ -961,14 +961,13 @@ def __repr__(self):
data = self._format_data()
attrs = self._format_attrs()
space = self._format_space()

prepr = (",%s" % space).join("%s=%s" % (k, v) for k, v in attrs)
prepr = f",{space}".join(f"{k}={v}" for k, v in attrs)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

let's try to minimize what we execute in f-space (better name for this?)


# no data provided, just attributes
if data is None:
data = ""

res = "%s(%s%s)" % (klass, data, prepr)
res = f"{klass}({data}{prepr})"

return res

Expand Down Expand Up @@ -1122,13 +1121,13 @@ def _summary(self, name=None):
tail = self[-1]
if hasattr(tail, "format") and not isinstance(tail, str):
tail = tail.format()
index_summary = ", %s to %s" % (pprint_thing(head), pprint_thing(tail))
index_summary = f", {pprint_thing(head)} to {pprint_thing(tail)}"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

pprint_thing may be unnecessary here now that we're py3-only. can you take a look

else:
index_summary = ""

if name is None:
name = type(self).__name__
return "%s: %s entries%s" % (name, len(self), index_summary)
return f"{name}: {len(self)} entries{index_summary}"

def summary(self, name=None):
"""
Expand Down Expand Up @@ -1302,7 +1301,7 @@ def _set_names(self, values, level=None):
if not is_list_like(values):
raise ValueError("Names must be a list-like")
if len(values) != 1:
raise ValueError("Length of new names must be 1, got %d" % len(values))
raise ValueError(f"Length of new names must be 1, got {len(values)}")

# GH 20527
# All items in 'name' need to be hashable:
Expand Down Expand Up @@ -1473,8 +1472,8 @@ def _validate_index_level(self, level):
if isinstance(level, int):
if level < 0 and level != -1:
raise IndexError(
"Too many levels: Index has only 1 level,"
" %d is not a valid level number" % (level,)
f"Too many levels: Index has only 1 level,"
f" {level} is not a valid level number"
)
elif level > 0:
raise IndexError(
Expand Down Expand Up @@ -4562,7 +4561,7 @@ def shift(self, periods=1, freq=None):
'2012-03-01'],
dtype='datetime64[ns]', freq='MS')
"""
raise NotImplementedError("Not supported for type %s" % type(self).__name__)
raise NotImplementedError(f"Not supported for type {type(self).__name__}")

def argsort(self, *args, **kwargs):
"""
Expand Down Expand Up @@ -5069,8 +5068,8 @@ def get_slice_bound(self, label, side, kind):

if side not in ("left", "right"):
raise ValueError(
"Invalid value for side kwarg,"
" must be either 'left' or 'right': %s" % (side,)
f"Invalid value for side kwarg, must be either"
f" 'left' or 'right': {side}"
)

original_label = label
Expand Down Expand Up @@ -5624,7 +5623,7 @@ def _trim_front(strings):

def _validate_join_method(method):
if method not in ["left", "right", "inner", "outer"]:
raise ValueError("do not recognize join method %s" % method)
raise ValueError(f"do not recognize join method {method}")


def default_index(n):
Expand Down
2 changes: 1 addition & 1 deletion pandas/core/indexes/category.py
Original file line number Diff line number Diff line change
Expand Up @@ -357,7 +357,7 @@ def _format_attrs(self):
]
if self.name is not None:
attrs.append(("name", ibase.default_pprint(self.name)))
attrs.append(("dtype", "'%s'" % self.dtype.name))
attrs.append(("dtype", f"'{self.dtype.name}'"))
max_seq_items = get_option("display.max_seq_items") or len(self)
if len(self) > max_seq_items:
attrs.append(("length", len(self)))
Expand Down
Loading