Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/resnet/resnet_main.py
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ def train():
# testing task with the current weights every 200 steps.
acc = ray.get(acc_id)
acc_id = test_actor.accuracy.remote(weight_id, step)
print("Step {0}: {1:.6f}".format(step - 200, acc))
print("Step {}: {:.6f}".format(step - 200, acc))
except KeyboardInterrupt:
pass

Expand Down
4 changes: 2 additions & 2 deletions python/ray/cloudpickle/cloudpickle.py
Original file line number Diff line number Diff line change
Expand Up @@ -572,8 +572,8 @@ def extract_code_globals(cls, co):
# PyPy "builtin-code" object
out_names = set()
else:
out_names = set(names[oparg]
for op, oparg in _walk_global_ops(co))
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let's not make any changes to this file as it is directly copied from the cloudpickle repository.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good point. Put in a PR w/cloudpickle to change that though.

out_names = {names[oparg]
for op, oparg in _walk_global_ops(co)}

# see if nested function have any global refs
if co.co_consts:
Expand Down
2 changes: 1 addition & 1 deletion python/ray/common/test/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ def test_hashability(self):
x = random_object_id()
y = random_object_id()
{x: y}
set([x, y])
{x, y}


class TestTask(unittest.TestCase):
Expand Down
22 changes: 11 additions & 11 deletions python/ray/dataframe/dataframe.py
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ def __repr__(self):
# The split here is so that we don't repr pandas row lengths.
result = self._repr_helper_()
final_result = repr(result).rsplit("\n\n", maxsplit=1)[0] + \
"\n\n[{0} rows x {1} columns]".format(len(self.index),
"\n\n[{} rows x {} columns]".format(len(self.index),
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If we do this change (and others) we'll need to fix the indentation of the next line

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm fine leaving this as is then, though I'd prefer that there be a git hook to require all code to be run through auto-formatters. It'd unify things like the use of certain syntax, indent, and quote styles.

len(self.columns))
return final_result

Expand All @@ -279,7 +279,7 @@ def _repr_html_(self):
# We split so that we insert our correct dataframe dimensions.
result = self._repr_helper_()._repr_html_()
return result.split('<p>')[0] + \
'<p>{0} rows × {1} columns</p>\n</div>'.format(len(self.index),
'<p>{} rows × {} columns</p>\n</div>'.format(len(self.index),
len(self.columns))

def _get_index(self):
Expand Down Expand Up @@ -527,7 +527,7 @@ def applymap(self, func):
"""
if not callable(func):
raise ValueError(
"\'{0}\' object is not callable".format(type(func)))
"\'{}\' object is not callable".format(type(func)))

new_block_partitions = np.array([
_map_partitions(lambda df: df.applymap(func), block)
Expand Down Expand Up @@ -1601,7 +1601,7 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,

if isinstance(value, (list, tuple)):
raise TypeError('"value" parameter must be a scalar or dict, but '
'you passed a "{0}"'.format(type(value).__name__))
'you passed a "{}"'.format(type(value).__name__))
if value is None and method is None:
raise ValueError('must specify a fill method or value')
if value is not None and method is not None:
Expand Down Expand Up @@ -1875,7 +1875,7 @@ def info_helper(df):
index_string = self.index.summary() + '\n'

# A column header is needed in the inf() output
col_header = 'Data columns (total {0} columns):\n'.format(
col_header = 'Data columns (total {} columns):\n'.format(
len(self.columns))

# Parse the per-partition values to get the per-column details
Expand All @@ -1884,15 +1884,15 @@ def info_helper(df):
col_lines = [prog.match(line) for line in lines]
cols = [c.group(0) for c in col_lines if c is not None]
# replace the partition columns names with real column names
columns = ["{0}\t{1}\n".format(self.columns[i],
columns = ["{}\t{}\n".format(self.columns[i],
cols[i].split(" ", 1)[1])
for i in range(len(cols))]
col_string = ''.join(columns) + '\n'

# A summary of the dtypes in the dataframe
dtypes_string = "dtypes: "
for dtype, count in self.dtypes.value_counts().iteritems():
dtypes_string += "{0}({1}),".format(dtype, count)
dtypes_string += "{}({}),".format(dtype, count)
dtypes_string = dtypes_string[:-1] + '\n'

# Compute the memory usage by summing per-partitions return values
Expand All @@ -1907,10 +1907,10 @@ def info_helper(df):
if len(mem_vals) != 0:
# Sum memory usage from each partition
if memory_usage != 'deep':
memory_string = 'memory usage: {0}+ bytes'.format(
memory_string = 'memory usage: {}+ bytes'.format(
sum(mem_vals))
else:
memory_string = 'memory usage: {0} bytes'.format(sum(mem_vals))
memory_string = 'memory usage: {} bytes'.format(sum(mem_vals))

# Combine all the components of the info() output
result = ''.join([class_string, index_string, col_header,
Expand Down Expand Up @@ -1939,10 +1939,10 @@ def insert(self, loc, column, value, allow_duplicates=False):
"Length of values does not match length of index")
if not allow_duplicates and column in self.columns:
raise ValueError(
"cannot insert {0}, already exists".format(column))
"cannot insert {}, already exists".format(column))
if loc > len(self.columns):
raise IndexError(
"index {0} is out of bounds for axis 0 with size {1}".format(
"index {} is out of bounds for axis 0 with size {}".format(
loc, len(self.columns)))
if loc < 0:
raise ValueError("unbounded slice")
Expand Down
6 changes: 3 additions & 3 deletions python/ray/dataframe/groupby.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def tshift(self):

@property
def groups(self):
return dict([(k, pd.Index(v)) for k, v in self._keys_and_values])
return {k: pd.Index(v) for k, v in self._keys_and_values}

def min(self, **kwargs):
return self._apply_agg_function(lambda df: df.min(**kwargs))
Expand Down Expand Up @@ -335,7 +335,7 @@ def take(self, **kwargs):
return self._apply_df_function(lambda df: df.take(**kwargs))

def _apply_agg_function(self, f):
assert callable(f), "\'{0}\' object is not callable".format(type(f))
assert callable(f), "\'{}\' object is not callable".format(type(f))

result = [pd.DataFrame(f(v)).T for k, v in self._iter]

Expand All @@ -350,7 +350,7 @@ def _apply_agg_function(self, f):
return new_df

def _apply_df_function(self, f):
assert callable(f), "\'{0}\' object is not callable".format(type(f))
assert callable(f), "\'{}\' object is not callable".format(type(f))

result = [f(v) for k, v in self._iter]

Expand Down
2 changes: 1 addition & 1 deletion python/ray/dataframe/index_metadata.py
Original file line number Diff line number Diff line change
Expand Up @@ -226,7 +226,7 @@ def insert(self, key, loc=None, partition=None,
partition = np.digitize(loc, cum_lens[:-1])
if partition >= len(cum_lens):
if loc > cum_lens[-1]:
raise IndexError("index {0} is out of bounds".format(loc))
raise IndexError("index {} is out of bounds".format(loc))
else:
index_within_partition = self._lengths[-1]
else:
Expand Down
2 changes: 1 addition & 1 deletion python/ray/experimental/tfutils.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def __init__(self, loss, sess=None, input_variables=None):
self.sess = sess
queue = deque([loss])
variable_names = []
explored_inputs = set([loss])
explored_inputs = {loss}

# We do a BFS on the dependency graph of the input function to find
# the variables.
Expand Down
14 changes: 7 additions & 7 deletions python/ray/plasma/test/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,7 @@ def test_wait(self):
self.client1.seal(obj_id1)
ready, waiting = self.client1.wait(
[obj_id1], timeout=100, num_returns=1)
self.assertEqual(set(ready), set([obj_id1]))
self.assertEqual(set(ready), {obj_id1})
self.assertEqual(waiting, [])

# Test wait if only one object available and only one object waited
Expand All @@ -307,8 +307,8 @@ def test_wait(self):
# Don't seal.
ready, waiting = self.client1.wait(
[obj_id2, obj_id1], timeout=100, num_returns=1)
self.assertEqual(set(ready), set([obj_id1]))
self.assertEqual(set(waiting), set([obj_id2]))
self.assertEqual(set(ready), {obj_id1})
self.assertEqual(set(waiting), {obj_id2})

# Test wait if object is sealed later.
obj_id3 = random_object_id()
Expand All @@ -321,14 +321,14 @@ def finish():
t.start()
ready, waiting = self.client1.wait(
[obj_id3, obj_id2, obj_id1], timeout=1000, num_returns=2)
self.assertEqual(set(ready), set([obj_id1, obj_id3]))
self.assertEqual(set(waiting), set([obj_id2]))
self.assertEqual(set(ready), {obj_id1, obj_id3})
self.assertEqual(set(waiting), {obj_id2})

# Test if the appropriate number of objects is shown if some objects
# are not ready.
ready, waiting = self.client1.wait([obj_id3, obj_id2, obj_id1], 100, 3)
self.assertEqual(set(ready), set([obj_id1, obj_id3]))
self.assertEqual(set(waiting), set([obj_id2]))
self.assertEqual(set(ready), {obj_id1, obj_id3})
self.assertEqual(set(waiting), {obj_id2})

# Don't forget to seal obj_id2.
self.client1.seal(obj_id2)
Expand Down
2 changes: 1 addition & 1 deletion python/ray/signature.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def func():
for attr in attrs:
setattr(func, attr, getattr(original_func, attr))
else:
raise TypeError("{0!r} is not a Python function we can process"
raise TypeError("{!r} is not a Python function we can process"
.format(func))

return list(funcsigs.signature(func).parameters.items())
Expand Down
16 changes: 8 additions & 8 deletions python/ray/tune/test/trial_scheduler_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -688,36 +688,36 @@ def assertProduces(fn, values):
# Categorical case
assertProduces(
lambda: explore({"v": 4}, {"v": [3, 4, 8, 10]}, 0.0, lambda x: x),
set([3, 8]))
{3, 8})
assertProduces(
lambda: explore({"v": 3}, {"v": [3, 4, 8, 10]}, 0.0, lambda x: x),
set([3, 4]))
{3, 4})
assertProduces(
lambda: explore({"v": 10}, {"v": [3, 4, 8, 10]}, 0.0, lambda x: x),
set([8, 10]))
{8, 10})
assertProduces(
lambda: explore({"v": 7}, {"v": [3, 4, 8, 10]}, 0.0, lambda x: x),
set([3, 4, 8, 10]))
{3, 4, 8, 10})
assertProduces(
lambda: explore({"v": 4}, {"v": [3, 4, 8, 10]}, 1.0, lambda x: x),
set([3, 4, 8, 10]))
{3, 4, 8, 10})

# Continuous case
assertProduces(
lambda: explore(
{"v": 100}, {"v": lambda: random.choice([10, 100])}, 0.0,
lambda x: x),
set([80, 120]))
{80, 120})
assertProduces(
lambda: explore(
{"v": 100.0}, {"v": lambda: random.choice([10, 100])}, 0.0,
lambda x: x),
set([80.0, 120.0]))
{80.0, 120.0})
assertProduces(
lambda: explore(
{"v": 100.0}, {"v": lambda: random.choice([10, 100])}, 1.0,
lambda x: x),
set([10.0, 100.0]))
{10.0, 100.0})

def testYieldsTimeToOtherTrials(self):
pbt, runner = self.basicSetup()
Expand Down
2 changes: 1 addition & 1 deletion python/ray/tune/trial_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ def debug_string(self, max_debug=MAX_DEBUG_TRIALS):
if max_debug == start_num:
break

for local_dir in sorted(set([t.local_dir for t in self._trials])):
for local_dir in sorted({t.local_dir for t in self._trials}):
messages.append("Result logdir: {}".format(local_dir))
for state, trials in sorted(states.items()):
limit = limit_per_state[state]
Expand Down
4 changes: 2 additions & 2 deletions python/ray/worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -464,9 +464,9 @@ def get_object(self, object_ids):
final_results = self.retrieve_and_deserialize(plain_object_ids, 0)
# Construct a dictionary mapping object IDs that we haven't gotten yet
# to their original index in the object_ids argument.
unready_ids = dict((plain_object_ids[i].binary(), i)
unready_ids = {plain_object_ids[i].binary(): i
for (i, val) in enumerate(final_results)
if val is plasma.ObjectNotAvailable)
if val is plasma.ObjectNotAvailable}
was_blocked = (len(unready_ids) > 0)
# Try reconstructing any objects we haven't gotten yet. Try to get them
# until at least get_timeout_milliseconds milliseconds passes, then
Expand Down
12 changes: 6 additions & 6 deletions test/actor_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -774,7 +774,7 @@ def get_location_and_ids(self):
# Make sure that no two actors are assigned to the same GPU.
locations_and_ids = ray.get(
[actor.get_location_and_ids.remote() for actor in actors])
node_names = set([location for location, gpu_id in locations_and_ids])
node_names = {location for location, gpu_id in locations_and_ids}
self.assertEqual(len(node_names), num_local_schedulers)
location_actor_combinations = []
for node_name in node_names:
Expand Down Expand Up @@ -815,7 +815,7 @@ def get_location_and_ids(self):
# Make sure that no two actors are assigned to the same GPU.
locations_and_ids = ray.get(
[actor.get_location_and_ids.remote() for actor in actors1])
node_names = set([location for location, gpu_id in locations_and_ids])
node_names = {location for location, gpu_id in locations_and_ids}
self.assertEqual(len(node_names), num_local_schedulers)

# Keep track of which GPU IDs are being used for each location.
Expand Down Expand Up @@ -849,7 +849,7 @@ def get_location_and_ids(self):
[actor.get_location_and_ids.remote() for actor in actors2])
self.assertEqual(
node_names,
set([location for location, gpu_id in locations_and_ids]))
{location for location, gpu_id in locations_and_ids})
for location, gpu_ids in locations_and_ids:
gpus_in_use[location].extend(gpu_ids)
for node_name in node_names:
Expand Down Expand Up @@ -887,7 +887,7 @@ def get_location_and_ids(self):
# Make sure that no two actors are assigned to the same GPU.
locations_and_ids = ray.get(
[actor.get_location_and_ids.remote() for actor in actors])
node_names = set([location for location, gpu_id in locations_and_ids])
node_names = {location for location, gpu_id in locations_and_ids}
self.assertEqual(len(node_names), 2)
for node_name in node_names:
node_gpu_ids = [
Expand All @@ -897,7 +897,7 @@ def get_location_and_ids(self):
self.assertIn(len(node_gpu_ids), [5, 10])
self.assertEqual(
set(node_gpu_ids),
set([(i, ) for i in range(len(node_gpu_ids))]))
{(i, ) for i in range(len(node_gpu_ids))})

# Creating a new actor should fail because all of the GPUs are being
# used.
Expand Down Expand Up @@ -1942,7 +1942,7 @@ def method(self):

results = ray.get([result1, result2, result3])
self.assertEqual(results[0], results[2])
self.assertEqual(set(results), set([0, 1]))
self.assertEqual(set(results), {0, 1})

# Make sure that when one actor goes out of scope a new actor is
# created because some resources have been freed up.
Expand Down
6 changes: 3 additions & 3 deletions test/runtest.py
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,7 @@ def temp():

# Test sets.
self.assertEqual(ray.get(f.remote(set())), set())
s = set([1, (1, 2, "hi")])
s = {1, (1, 2, "hi")}
self.assertEqual(ray.get(f.remote(s)), s)

# Test types.
Expand Down Expand Up @@ -1317,8 +1317,8 @@ def f():
self.assertEqual(list_of_ids, 10 * [[]])

list_of_ids = ray.get([f1.remote() for _ in range(10)])
set_of_ids = set([tuple(gpu_ids) for gpu_ids in list_of_ids])
self.assertEqual(set_of_ids, set([(i, ) for i in range(10)]))
set_of_ids = {tuple(gpu_ids) for gpu_ids in list_of_ids}
self.assertEqual(set_of_ids, {(i, ) for i in range(10)})

list_of_ids = ray.get([f2.remote(), f4.remote(), f4.remote()])
all_ids = [gpu_id for gpu_ids in list_of_ids for gpu_id in gpu_ids]
Expand Down
4 changes: 2 additions & 2 deletions test/stress_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,8 +210,8 @@ def tearDown(self):
state._initialize_global_state(self.redis_ip_address, self.redis_port)
if os.environ.get('RAY_USE_NEW_GCS', False):
tasks = state.task_table()
local_scheduler_ids = set(
task["LocalSchedulerID"] for task in tasks.values())
local_scheduler_ids = {
task["LocalSchedulerID"] for task in tasks.values()}

# Make sure that all nodes in the cluster were used by checking that
# the set of local scheduler IDs that had a task scheduled or submitted
Expand Down