diff --git a/README.md b/README.md index 4849d396..93f2e490 100644 --- a/README.md +++ b/README.md @@ -131,6 +131,9 @@ import runpod runpod.api_key = "your_runpod_api_key_found_under_settings" +# Get all my pods +pods = runpod.get_pods() + # Create a pod pod = runpod.create_pod("test", "runpod/stack", "NVIDIA GeForce RTX 3070") diff --git a/runpod/__init__.py b/runpod/__init__.py index 59fc78dc..96806fe1 100644 --- a/runpod/__init__.py +++ b/runpod/__init__.py @@ -7,6 +7,7 @@ from .endpoint import AsyncioEndpoint, AsyncioJob from .api_wrapper.ctl_commands import( get_gpus, get_gpu, + get_pods, create_pod, stop_pod, resume_pod, terminate_pod ) from .cli.config import set_credentials, check_credentials, get_credentials diff --git a/runpod/api_wrapper/ctl_commands.py b/runpod/api_wrapper/ctl_commands.py index c8bc7a63..06bad8d9 100644 --- a/runpod/api_wrapper/ctl_commands.py +++ b/runpod/api_wrapper/ctl_commands.py @@ -5,8 +5,9 @@ from typing import Optional from .queries import gpus +from .queries import pods as pod_queries from .graphql import run_graphql_query -from .mutations import pods +from .mutations import pods as pod_mutations def get_gpus() -> dict: @@ -28,6 +29,13 @@ def get_gpu(gpu_id : str): cleaned_return = raw_return["data"]["gpuTypes"][0] return cleaned_return +def get_pods() -> dict: + ''' + Get all pods + ''' + raw_return = run_graphql_query(pod_queries.QUERY_POD) + cleaned_return = raw_return["data"]["myself"]["pods"] + return cleaned_return def create_pod(name : str, image_name : str, gpu_type_id : str, cloud_type : str="ALL", data_center_id : Optional[str]=None, country_code:Optional[str]=None, @@ -58,7 +66,7 @@ def create_pod(name : str, image_name : str, gpu_type_id : str, cloud_type : str ''' raw_response = run_graphql_query( - pods.generate_pod_deployment_mutation( + pod_mutations.generate_pod_deployment_mutation( name, image_name, gpu_type_id, cloud_type, data_center_id, country_code, gpu_count, volume_in_gb, container_disk_in_gb, min_vcpu_count, min_memory_in_gb, docker_args, ports, volume_mount_path, env) @@ -80,7 +88,7 @@ def stop_pod(pod_id: str): >>> runpod.stop_pod(pod_id) ''' raw_response = run_graphql_query( - pods.generate_pod_stop_mutation(pod_id) + pod_mutations.generate_pod_stop_mutation(pod_id) ) cleaned_response = raw_response["data"]["podStop"] @@ -101,7 +109,7 @@ def resume_pod(pod_id: str, gpu_count: int): >>> runpod.resume_pod(pod_id) ''' raw_response = run_graphql_query( - pods.generate_pod_resume_mutation(pod_id, gpu_count) + pod_mutations.generate_pod_resume_mutation(pod_id, gpu_count) ) cleaned_response = raw_response["data"]["podResume"] @@ -120,5 +128,5 @@ def terminate_pod(pod_id: str): >>> runpod.terminate_pod(pod_id) ''' run_graphql_query( - pods.generate_pod_terminate_mutation(pod_id) + pod_mutations.generate_pod_terminate_mutation(pod_id) ) diff --git a/runpod/api_wrapper/queries/pods.py b/runpod/api_wrapper/queries/pods.py new file mode 100644 index 00000000..6bb2075d --- /dev/null +++ b/runpod/api_wrapper/queries/pods.py @@ -0,0 +1,35 @@ +""" +RunPod | API Wrapper | Queries | GPUs +""" + +QUERY_POD = """ +query myPods {{ + myself {{ + pods {{ + id + containerDiskInGb + costPerHr + desiredStatus + dockerArgs + dockerId + env + gpuCount + imageName + lastStatusChange + machineId + memoryInGb + name + podType + port + ports + uptimeSeconds + vcpuCount + volumeInGb + volumeMountPath + machine {{ + gpuDisplayName + }} + }} + }} +}} +""" diff --git a/tests/test_api_wrapper/test_ctl_commands.py b/tests/test_api_wrapper/test_ctl_commands.py index 6c003c82..319ea9a5 100644 --- a/tests/test_api_wrapper/test_ctl_commands.py +++ b/tests/test_api_wrapper/test_ctl_commands.py @@ -122,3 +122,45 @@ def test_terminate_pod(self): } self.assertIsNone(ctl_commands.terminate_pod(pod_id="POD_ID")) + + def test_get_pods(self): + ''' + Tests get_pods + ''' + with patch("runpod.api_wrapper.graphql.requests.post") as patch_request: + patch_request.return_value.json.return_value = { + "data": { + "myself": { + "pods": [ + { + "id": "POD_ID", + "containerDiskInGb": 5, + "costPerHr": 0.34, + "desiredStatus": "RUNNING", + "dockerArgs": None, + "dockerId": None, + "env": [], + "gpuCount": 1, + "imageName": "runpod/pytorch:2.0.1-py3.10-cuda11.8.0-devel", + "lastStatusChange": "Rented by User: Tue Aug 15 2023", + "machineId": "MACHINE_ID", + "memoryInGb": 83, + "name": "POD_NAME", + "podType": "RESERVED", + "port": None, + "ports": "80/http", + "uptimeSeconds": 0, + "vcpuCount": 21, + "volumeInGb": 200, + "volumeMountPath": "/workspace", + "machine": { "gpuDisplayName": "RTX 3090" } + } + ] + } + } + } + + pods = ctl_commands.get_pods() + + self.assertEqual(len(pods), 1) + self.assertEqual(pods[0]["id"], "POD_ID")