Skip to content

Commit

Permalink
test: var (#89)
Browse files Browse the repository at this point in the history
Co-authored-by: Yu-Han Liu <[email protected]>
  • Loading branch information
morgandu and dizcology authored Nov 26, 2020
1 parent 19dc31a commit 860d12e
Show file tree
Hide file tree
Showing 4 changed files with 54 additions and 49 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,14 @@
PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT")
LOCATION = "us-central1"
MODEL_ID = "3530998029718913024" # permanent_swim_run_videos_action_recognition_model
DISPLAY_NAME = f"temp_create_batch_prediction_job_video_action_recognition_test_{uuid.uuid4()}"
DISPLAY_NAME = (
f"temp_create_batch_prediction_job_video_action_recognition_test_{uuid.uuid4()}"
)
GCS_SOURCE_URI = "gs://automl-video-demo-data/ucaip-var/swimrun_bp.jsonl"
GCS_OUTPUT_URI = "gs://ucaip-samples-test-output/"
API_ENDPOINT = "us-central1-aiplatform.googleapis.com"


@pytest.fixture
def shared_state():
state = {}
Expand All @@ -39,14 +42,27 @@ def shared_state():
@pytest.fixture
def job_client():
client_options = {"api_endpoint": API_ENDPOINT}
job_client = aiplatform.gapic.JobServiceClient(
client_options=client_options)
job_client = aiplatform.gapic.JobServiceClient(client_options=client_options)
yield job_client


@pytest.fixture(scope="function", autouse=True)
def teardown(shared_state, job_client):
yield

# Stop the batch prediction job
# Delete the batch prediction job
job_client.cancel_batch_prediction_job(
name=shared_state["batch_prediction_job_name"]
)

# Waiting for batch prediction job to be in CANCELLED state
helpers.wait_for_job_state(
get_job_method=job_client.get_batch_prediction_job,
name=shared_state["batch_prediction_job_name"],
)

# Delete the batch prediction job
job_client.delete_batch_prediction_job(
name=shared_state["batch_prediction_job_name"]
)
Expand All @@ -57,12 +73,12 @@ def test_create_batch_prediction_job_video_action_recognition_sample(
capsys, shared_state, job_client
):

model = f"projects/{PROJECT_ID}/locations/{LOCATION}/models/{MODEL_ID}"
model_name = f"projects/{PROJECT_ID}/locations/{LOCATION}/models/{MODEL_ID}"

create_batch_prediction_job_video_action_recognition_sample.create_batch_prediction_job_video_action_recognition_sample(
project=PROJECT_ID,
display_name=DISPLAY_NAME,
model=model,
model=model_name,
gcs_source_uri=GCS_SOURCE_URI,
gcs_destination_output_uri_prefix=GCS_OUTPUT_URI,
)
Expand All @@ -71,12 +87,3 @@ def test_create_batch_prediction_job_video_action_recognition_sample(

# Save resource name of the newly created batch prediction job
shared_state["batch_prediction_job_name"] = helpers.get_name(out)

# Waiting for batch prediction job to be in CANCELLED state
helpers.wait_for_job_state(
get_job_method=job_client.get_batch_prediction_job,
name=shared_state["batch_prediction_job_name"],
expected_state="SUCCEEDED",
timeout=600,
freq=20,
)
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,17 @@

LOCATION = "us-central1"
PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT")
DATASET_ID = "6881957627459272704" # permanent_swim_run_videos_action_recognition_dataset
DISPLAY_NAME = f"temp_create_training_pipeline_video_action_recognition_test_{uuid.uuid4()}"
DATASET_ID = (
"6881957627459272704" # permanent_swim_run_videos_action_recognition_dataset
)
DISPLAY_NAME = (
f"temp_create_training_pipeline_video_action_recognition_test_{uuid.uuid4()}"
)
MODEL_DISPLAY_NAME = f"Temp Model for {DISPLAY_NAME}"
MODEL_TYPE = "CLOUD"
API_ENDPOINT = "us-central1-aiplatform.googleapis.com"


@pytest.fixture
def shared_state():
state = {}
Expand All @@ -44,27 +49,32 @@ def pipeline_client():
)
yield pipeline_client

@pytest.fixture(scope="function", autouse=True)
def teardown(shared_state, pipeline_client):
yield

@pytest.fixture
def model_client():
client_options = {"api_endpoint": API_ENDPOINT}
model_client = aiplatform.gapic.ModelServiceClient(
client_options=client_options)
yield model_client
# Stop the training pipeline
pipeline_client.cancel_training_pipeline(
name=shared_state["training_pipeline_name"]
)

# Waiting for training pipeline to be in CANCELLED state
helpers.wait_for_job_state(
get_job_method=pipeline_client.get_training_pipeline,
name=shared_state["training_pipeline_name"],
)

@pytest.fixture(scope="function", autouse=True)
def teardown(shared_state, model_client, pipeline_client):
yield
model_client.delete_model(name=shared_state["model_name"])
# Delete the training pipeline
pipeline_client.delete_training_pipeline(
name=shared_state["training_pipeline_name"]
)




# Training AutoML Vision Model
def test_create_training_pipeline_video_action_recognition_sample(
capsys, shared_state, pipeline_client
capsys, shared_state
):
create_training_pipeline_video_action_recognition_sample.create_training_pipeline_video_action_recognition_sample(
project=PROJECT_ID,
Expand All @@ -75,26 +85,7 @@ def test_create_training_pipeline_video_action_recognition_sample(
)

out, _ = capsys.readouterr()

assert "response:" in out

# Save resource name of the newly created training pipeline
shared_state["training_pipeline_name"] = helpers.get_name(out)

# Poll until the pipeline succeeds because we want to test the model_upload step as well.
helpers.wait_for_job_state(
get_job_method=pipeline_client.get_training_pipeline,
name=shared_state["training_pipeline_name"],
expected_state="SUCCEEDED",
timeout=5000,
freq=20,
)

training_pipeline = pipeline_client.get_training_pipeline(
name=shared_state["training_pipeline_name"]
)

# Check that the model indeed has been uploaded.
assert training_pipeline.model_to_upload.name != ""

shared_state["model_name"] = training_pipeline.model_to_upload.name
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,15 @@
from google.cloud import storage

PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT")
MODEL_ID = "3422489426196955136" # permanent_swim_run_videos_action_recognition_edge_model
GCS_URI = "gs://ucaip-samples-test-output/tmp/export_model_video_action_recognition_sample"
MODEL_ID = (
"3422489426196955136" # permanent_swim_run_videos_action_recognition_edge_model
)
GCS_URI = (
"gs://ucaip-samples-test-output/tmp/export_model_video_action_recognition_sample"
)
EXPORT_FORMAT = "tf-saved-model"


@pytest.fixture(scope="function", autouse=True)
def teardown():
yield
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,9 @@
PROJECT_ID = os.getenv("BUILD_SPECIFIC_GCLOUD_PROJECT")
LOCATION = "us-central1"
GCS_SOURCE = "gs://automl-video-demo-data/ucaip-var/swimrun.jsonl"
METADATA_SCHEMA_URI = "gs://google-cloud-aiplatform/schema/dataset/metadata/video_1.0.0.yaml"
METADATA_SCHEMA_URI = (
"gs://google-cloud-aiplatform/schema/dataset/metadata/video_1.0.0.yaml"
)

API_ENDPOINT = "us-central1-aiplatform.googleapis.com"
DISPLAY_NAME = f"temp_import_data_video_action_recognition_test_{uuid.uuid4()}"
Expand Down

0 comments on commit 860d12e

Please sign in to comment.