Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion ci/long_running_distributed_tests/ray-project/cluster.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ setup_commands:
head_start_ray_commands:
- ray stop
- export RAY_BACKEND_LOG_LEVEL=debug
- ray start --head --redis-port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml
- ray start --head --port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml

# Command to start ray on worker nodes. You don't need to change this.
worker_start_ray_commands:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ setup_commands:
# Command to start ray on the head node. You don't need to change this.
head_start_ray_commands:
- source activate tensorflow_p36 && ray stop
- ulimit -n 65536; source activate tensorflow_p36 && OMP_NUM_THREADS=1 ray start --head --redis-port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml
- ulimit -n 65536; source activate tensorflow_p36 && OMP_NUM_THREADS=1 ray start --head --port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml

# Command to start ray on worker nodes. You don't need to change this.
worker_start_ray_commands:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ worker_setup_commands: []
# Command to start ray on the head node. You don't need to change this.
head_start_ray_commands:
- source activate tensorflow_p36 && ray stop
- ulimit -n 65536; source activate tensorflow_p36 && OMP_NUM_THREADS=1 ray start --head --redis-port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml
- ulimit -n 65536; source activate tensorflow_p36 && OMP_NUM_THREADS=1 ray start --head --port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml

# Command to start ray on worker nodes. You don't need to change this.
worker_start_ray_commands:
Expand Down
2 changes: 1 addition & 1 deletion ci/regression_test/stress_tests/ray-project/cluster.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ worker_setup_commands: []
# Command to start ray on the head node. You don't need to change this.
head_start_ray_commands:
- ray stop
- ulimit -n 65536; ray start --head --num-redis-shards=5 --redis-port=6379 --autoscaling-config=~/ray_bootstrap_config.yaml
- ulimit -n 65536; ray start --head --port=6379 --autoscaling-config=~/ray_bootstrap_config.yaml

# Command to start ray on worker nodes. You don't need to change this.
worker_start_ray_commands:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ spec:
key: value

# Command to start ray
command: ray start --head --block --redis-port=6379 --node-ip-address=$MY_POD_IP --object-manager-port=12345 --node-manager-port=12346 --object-store-memory=100000000 --num-cpus=1
command: ray start --head --block --port=6379 --node-ip-address=$MY_POD_IP --object-manager-port=12345 --node-manager-port=12346 --object-store-memory=100000000 --num-cpus=1

# use affinity to select nodes.Optional.
# Refer to https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ spec:
raycluster.group.name: head-group

# Command to start ray
command: ray start --head --block --redis-port=6379 --node-ip-address=$MY_POD_IP --object-manager-port=12345 --node-manager-port=12346 --object-store-memory=100000000 --num-cpus=1
command: ray start --head --block --port=6379 --node-ip-address=$MY_POD_IP --object-manager-port=12345 --node-manager-port=12346 --object-store-memory=100000000 --num-cpus=1

# resource requirements
# Refer to https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ spec:
raycluster.group.name: head-group

# Command to start ray
command: ray start --head --block --redis-port=6379 --node-ip-address=$MY_POD_IP --object-manager-port=12345 --node-manager-port=12346 --object-store-memory=100000000 --num-cpus=1
command: ray start --head --block --port=6379 --node-ip-address=$MY_POD_IP --object-manager-port=12345 --node-manager-port=12346 --object-store-memory=100000000 --num-cpus=1

# resource requirements
# Refer to https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
Expand Down
2 changes: 1 addition & 1 deletion doc/azure/azure-init.sh
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ NUM_GPUS=\`nvidia-smi -L | wc -l\`

ray stop
ulimit -n 65536
ray start --head --redis-port=6379 --object-manager-port=8076 --num-gpus=\$NUM_GPUS --block --webui-host 0.0.0.0
ray start --head -port=6379 --object-manager-port=8076 --num-gpus=\$NUM_GPUS --block --webui-host 0.0.0.0
EOM

cat > /home/"$USERNAME"/ray-worker.sh << EOM
Expand Down
2 changes: 1 addition & 1 deletion doc/examples/lm/lm-cluster.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ worker_setup_commands: []
head_start_ray_commands:
- ray stop
- ulimit -n 65536;
ray start --head --redis-port=6379
ray start --head --port=6379
--object-manager-port=8076
--autoscaling-config=~/ray_bootstrap_config.yaml

Expand Down
2 changes: 1 addition & 1 deletion doc/kubernetes/ray-cluster.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ spec:
imagePullPolicy: Always
command: [ "/bin/bash", "-c", "--" ]
args:
- "ray start --head --node-ip-address=$MY_POD_IP --redis-port=6379 --redis-shard-ports=6380,6381 --num-cpus=$MY_CPU_REQUEST --object-manager-port=12345 --node-manager-port=12346 --block"
- "ray start --head --node-ip-address=$MY_POD_IP --port=6379 --redis-shard-ports=6380,6381 --num-cpus=$MY_CPU_REQUEST --object-manager-port=12345 --node-manager-port=12346 --block"
ports:
- containerPort: 6379 # Redis port.
- containerPort: 6380 # Redis port.
Expand Down
2 changes: 1 addition & 1 deletion doc/source/cluster/yarn.rst
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ and heap memory to roughly 200 MB. This is conservative and should be set accord

.. code-block:: bash

ray start --head --redis-port=6379 --object-store-memory=200000000 --memory 200000000 --num-cpus=1
ray start --head --port=6379 --object-store-memory=200000000 --memory 200000000 --num-cpus=1

Execute the user script containing the Ray program.

Expand Down
2 changes: 1 addition & 1 deletion doc/yarn/ray-skein.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ services:
# By default, we set object store memory and heap memory to roughly 200 MB. This is conservative
# and should be set according to application needs.
#
ray start --head --redis-port=6379 --object-store-memory=200000000 --memory 200000000 --num-cpus=1
ray start --head --port=6379 --object-store-memory=200000000 --memory 200000000 --num-cpus=1

# This executes the user script.
python example.py
Expand Down
2 changes: 1 addition & 1 deletion python/ray/autoscaler/aws/development-example.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ worker_setup_commands: []
# Command to start ray on the head node. You don't need to change this.
head_start_ray_commands:
- ray stop
- ulimit -n 65536; ray start --head --num-redis-shards=10 --port=6379 --autoscaling-config=~/ray_bootstrap_config.yaml
- ulimit -n 65536; ray start --head --port=6379 --autoscaling-config=~/ray_bootstrap_config.yaml

# Command to start ray on worker nodes. You don't need to change this.
worker_start_ray_commands:
Expand Down
5 changes: 2 additions & 3 deletions python/ray/scripts/scripts.py
Original file line number Diff line number Diff line change
Expand Up @@ -525,9 +525,8 @@ def start(node_ip_address, address, port, redis_password, redis_shard_ports,
cli_logger.abort("`{}` should not be specified without `{}`.",
cf.bold("--port"), cf.bold("--head"))

raise Exception(
"If --head is not passed in, --port and --redis-port are not "
"allowed.")
raise Exception("If --head is not passed in, --port is not "
"allowed.")
if redis_shard_ports is not None:
cli_logger.abort("`{}` should not be specified without `{}`.",
cf.bold("--redis-shard-ports"), cf.bold("--head"))
Expand Down
2 changes: 1 addition & 1 deletion python/ray/util/sgd/tf/examples/tf-example-sgd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ worker_setup_commands: []
# # Command to start ray on the head node. You don't need to change this.
head_start_ray_commands:
- ray stop
- ray start --head --redis-port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml --object-store-memory=1000000000
- ray start --head --port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml --object-store-memory=1000000000

# Command to start ray on worker nodes. You don't need to change this.
worker_start_ray_commands:
Expand Down
2 changes: 1 addition & 1 deletion python/ray/util/sgd/torch/examples/example-sgd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ worker_setup_commands: []
# # Command to start ray on the head node. You don't need to change this.
head_start_ray_commands:
- ray stop
- ray start --head --redis-port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml --object-store-memory=1000000000
- ray start --head --port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml --object-store-memory=1000000000

# Command to start ray on worker nodes. You don't need to change this.
worker_start_ray_commands:
Expand Down