diff --git a/ci/long_running_distributed_tests/ray-project/cluster.yaml b/ci/long_running_distributed_tests/ray-project/cluster.yaml index d1224e6b6cbe..4aa655ccefe9 100644 --- a/ci/long_running_distributed_tests/ray-project/cluster.yaml +++ b/ci/long_running_distributed_tests/ray-project/cluster.yaml @@ -80,7 +80,7 @@ setup_commands: head_start_ray_commands: - ray stop - export RAY_BACKEND_LOG_LEVEL=debug - - ray start --head --redis-port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml + - ray start --head --port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml # Command to start ray on worker nodes. You don't need to change this. worker_start_ray_commands: diff --git a/ci/regression_test/rllib_regresssion_tests/ray-project/cluster.yaml b/ci/regression_test/rllib_regresssion_tests/ray-project/cluster.yaml index fe7a150922b6..708cb30cdfc6 100644 --- a/ci/regression_test/rllib_regresssion_tests/ray-project/cluster.yaml +++ b/ci/regression_test/rllib_regresssion_tests/ray-project/cluster.yaml @@ -36,7 +36,7 @@ setup_commands: # Command to start ray on the head node. You don't need to change this. head_start_ray_commands: - source activate tensorflow_p36 && ray stop - - ulimit -n 65536; source activate tensorflow_p36 && OMP_NUM_THREADS=1 ray start --head --redis-port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml + - ulimit -n 65536; source activate tensorflow_p36 && OMP_NUM_THREADS=1 ray start --head --port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml # Command to start ray on worker nodes. You don't need to change this. worker_start_ray_commands: diff --git a/ci/regression_test/rllib_stress_tests/ray-project/cluster.yaml b/ci/regression_test/rllib_stress_tests/ray-project/cluster.yaml index 56deae83c0f2..461bc7f585b8 100644 --- a/ci/regression_test/rllib_stress_tests/ray-project/cluster.yaml +++ b/ci/regression_test/rllib_stress_tests/ray-project/cluster.yaml @@ -102,7 +102,7 @@ worker_setup_commands: [] # Command to start ray on the head node. You don't need to change this. head_start_ray_commands: - source activate tensorflow_p36 && ray stop - - ulimit -n 65536; source activate tensorflow_p36 && OMP_NUM_THREADS=1 ray start --head --redis-port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml + - ulimit -n 65536; source activate tensorflow_p36 && OMP_NUM_THREADS=1 ray start --head --port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml # Command to start ray on worker nodes. You don't need to change this. worker_start_ray_commands: diff --git a/ci/regression_test/stress_tests/ray-project/cluster.yaml b/ci/regression_test/stress_tests/ray-project/cluster.yaml index 45bfa8e780f5..81534da83a59 100644 --- a/ci/regression_test/stress_tests/ray-project/cluster.yaml +++ b/ci/regression_test/stress_tests/ray-project/cluster.yaml @@ -112,7 +112,7 @@ worker_setup_commands: [] # Command to start ray on the head node. You don't need to change this. head_start_ray_commands: - ray stop - - ulimit -n 65536; ray start --head --num-redis-shards=5 --redis-port=6379 --autoscaling-config=~/ray_bootstrap_config.yaml + - ulimit -n 65536; ray start --head --port=6379 --autoscaling-config=~/ray_bootstrap_config.yaml # Command to start ray on worker nodes. You don't need to change this. worker_start_ray_commands: diff --git a/deploy/ray-operator/config/samples/ray_v1_raycluster.complete.yaml b/deploy/ray-operator/config/samples/ray_v1_raycluster.complete.yaml index b7fec11cfe9c..a97fa40ac1a6 100644 --- a/deploy/ray-operator/config/samples/ray_v1_raycluster.complete.yaml +++ b/deploy/ray-operator/config/samples/ray_v1_raycluster.complete.yaml @@ -194,7 +194,7 @@ spec: key: value # Command to start ray - command: ray start --head --block --redis-port=6379 --node-ip-address=$MY_POD_IP --object-manager-port=12345 --node-manager-port=12346 --object-store-memory=100000000 --num-cpus=1 + command: ray start --head --block --port=6379 --node-ip-address=$MY_POD_IP --object-manager-port=12345 --node-manager-port=12346 --object-store-memory=100000000 --num-cpus=1 # use affinity to select nodes.Optional. # Refer to https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity diff --git a/deploy/ray-operator/config/samples/ray_v1_raycluster.heterogeneous.yaml b/deploy/ray-operator/config/samples/ray_v1_raycluster.heterogeneous.yaml index ae791b79d984..5b8728a836fa 100644 --- a/deploy/ray-operator/config/samples/ray_v1_raycluster.heterogeneous.yaml +++ b/deploy/ray-operator/config/samples/ray_v1_raycluster.heterogeneous.yaml @@ -114,7 +114,7 @@ spec: raycluster.group.name: head-group # Command to start ray - command: ray start --head --block --redis-port=6379 --node-ip-address=$MY_POD_IP --object-manager-port=12345 --node-manager-port=12346 --object-store-memory=100000000 --num-cpus=1 + command: ray start --head --block --port=6379 --node-ip-address=$MY_POD_IP --object-manager-port=12345 --node-manager-port=12346 --object-store-memory=100000000 --num-cpus=1 # resource requirements # Refer to https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ diff --git a/deploy/ray-operator/config/samples/ray_v1_raycluster.mini.yaml b/deploy/ray-operator/config/samples/ray_v1_raycluster.mini.yaml index 8e77280009d5..9e9e5202684f 100644 --- a/deploy/ray-operator/config/samples/ray_v1_raycluster.mini.yaml +++ b/deploy/ray-operator/config/samples/ray_v1_raycluster.mini.yaml @@ -56,7 +56,7 @@ spec: raycluster.group.name: head-group # Command to start ray - command: ray start --head --block --redis-port=6379 --node-ip-address=$MY_POD_IP --object-manager-port=12345 --node-manager-port=12346 --object-store-memory=100000000 --num-cpus=1 + command: ray start --head --block --port=6379 --node-ip-address=$MY_POD_IP --object-manager-port=12345 --node-manager-port=12346 --object-store-memory=100000000 --num-cpus=1 # resource requirements # Refer to https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ diff --git a/doc/azure/azure-init.sh b/doc/azure/azure-init.sh index 7c0628200d1b..6a2eecd06515 100755 --- a/doc/azure/azure-init.sh +++ b/doc/azure/azure-init.sh @@ -19,7 +19,7 @@ NUM_GPUS=\`nvidia-smi -L | wc -l\` ray stop ulimit -n 65536 -ray start --head --redis-port=6379 --object-manager-port=8076 --num-gpus=\$NUM_GPUS --block --webui-host 0.0.0.0 +ray start --head -port=6379 --object-manager-port=8076 --num-gpus=\$NUM_GPUS --block --webui-host 0.0.0.0 EOM cat > /home/"$USERNAME"/ray-worker.sh << EOM diff --git a/doc/examples/lm/lm-cluster.yaml b/doc/examples/lm/lm-cluster.yaml index 74b600ad81f4..3590d482aa64 100644 --- a/doc/examples/lm/lm-cluster.yaml +++ b/doc/examples/lm/lm-cluster.yaml @@ -117,7 +117,7 @@ worker_setup_commands: [] head_start_ray_commands: - ray stop - ulimit -n 65536; - ray start --head --redis-port=6379 + ray start --head --port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml diff --git a/doc/kubernetes/ray-cluster.yaml b/doc/kubernetes/ray-cluster.yaml index 853f9dfb2f4f..e2e3c307d042 100644 --- a/doc/kubernetes/ray-cluster.yaml +++ b/doc/kubernetes/ray-cluster.yaml @@ -63,7 +63,7 @@ spec: imagePullPolicy: Always command: [ "/bin/bash", "-c", "--" ] args: - - "ray start --head --node-ip-address=$MY_POD_IP --redis-port=6379 --redis-shard-ports=6380,6381 --num-cpus=$MY_CPU_REQUEST --object-manager-port=12345 --node-manager-port=12346 --block" + - "ray start --head --node-ip-address=$MY_POD_IP --port=6379 --redis-shard-ports=6380,6381 --num-cpus=$MY_CPU_REQUEST --object-manager-port=12345 --node-manager-port=12346 --block" ports: - containerPort: 6379 # Redis port. - containerPort: 6380 # Redis port. diff --git a/doc/source/cluster/yarn.rst b/doc/source/cluster/yarn.rst index d40fe9da0c46..95615daceb56 100644 --- a/doc/source/cluster/yarn.rst +++ b/doc/source/cluster/yarn.rst @@ -114,7 +114,7 @@ and heap memory to roughly 200 MB. This is conservative and should be set accord .. code-block:: bash - ray start --head --redis-port=6379 --object-store-memory=200000000 --memory 200000000 --num-cpus=1 + ray start --head --port=6379 --object-store-memory=200000000 --memory 200000000 --num-cpus=1 Execute the user script containing the Ray program. diff --git a/doc/yarn/ray-skein.yaml b/doc/yarn/ray-skein.yaml index e23ddfffff68..252aff7b3774 100644 --- a/doc/yarn/ray-skein.yaml +++ b/doc/yarn/ray-skein.yaml @@ -28,7 +28,7 @@ services: # By default, we set object store memory and heap memory to roughly 200 MB. This is conservative # and should be set according to application needs. # - ray start --head --redis-port=6379 --object-store-memory=200000000 --memory 200000000 --num-cpus=1 + ray start --head --port=6379 --object-store-memory=200000000 --memory 200000000 --num-cpus=1 # This executes the user script. python example.py diff --git a/python/ray/autoscaler/aws/development-example.yaml b/python/ray/autoscaler/aws/development-example.yaml index 386a18aca326..1adf7d7c515d 100644 --- a/python/ray/autoscaler/aws/development-example.yaml +++ b/python/ray/autoscaler/aws/development-example.yaml @@ -111,7 +111,7 @@ worker_setup_commands: [] # Command to start ray on the head node. You don't need to change this. head_start_ray_commands: - ray stop - - ulimit -n 65536; ray start --head --num-redis-shards=10 --port=6379 --autoscaling-config=~/ray_bootstrap_config.yaml + - ulimit -n 65536; ray start --head --port=6379 --autoscaling-config=~/ray_bootstrap_config.yaml # Command to start ray on worker nodes. You don't need to change this. worker_start_ray_commands: diff --git a/python/ray/scripts/scripts.py b/python/ray/scripts/scripts.py index 256c672b7e76..31fbd28fd02d 100644 --- a/python/ray/scripts/scripts.py +++ b/python/ray/scripts/scripts.py @@ -525,9 +525,8 @@ def start(node_ip_address, address, port, redis_password, redis_shard_ports, cli_logger.abort("`{}` should not be specified without `{}`.", cf.bold("--port"), cf.bold("--head")) - raise Exception( - "If --head is not passed in, --port and --redis-port are not " - "allowed.") + raise Exception("If --head is not passed in, --port is not " + "allowed.") if redis_shard_ports is not None: cli_logger.abort("`{}` should not be specified without `{}`.", cf.bold("--redis-shard-ports"), cf.bold("--head")) diff --git a/python/ray/util/sgd/tf/examples/tf-example-sgd.yaml b/python/ray/util/sgd/tf/examples/tf-example-sgd.yaml index 33127ca5c4e4..7078bbc3affa 100644 --- a/python/ray/util/sgd/tf/examples/tf-example-sgd.yaml +++ b/python/ray/util/sgd/tf/examples/tf-example-sgd.yaml @@ -61,7 +61,7 @@ worker_setup_commands: [] # # Command to start ray on the head node. You don't need to change this. head_start_ray_commands: - ray stop - - ray start --head --redis-port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml --object-store-memory=1000000000 + - ray start --head --port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml --object-store-memory=1000000000 # Command to start ray on worker nodes. You don't need to change this. worker_start_ray_commands: diff --git a/python/ray/util/sgd/torch/examples/example-sgd.yaml b/python/ray/util/sgd/torch/examples/example-sgd.yaml index ba8434ac6357..2fb3cb036409 100644 --- a/python/ray/util/sgd/torch/examples/example-sgd.yaml +++ b/python/ray/util/sgd/torch/examples/example-sgd.yaml @@ -62,7 +62,7 @@ worker_setup_commands: [] # # Command to start ray on the head node. You don't need to change this. head_start_ray_commands: - ray stop - - ray start --head --redis-port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml --object-store-memory=1000000000 + - ray start --head --port=6379 --object-manager-port=8076 --autoscaling-config=~/ray_bootstrap_config.yaml --object-store-memory=1000000000 # Command to start ray on worker nodes. You don't need to change this. worker_start_ray_commands: