From 66882b6c5cb1b2a063717e5155a9d2652351fd27 Mon Sep 17 00:00:00 2001 From: fcakyon <34196005+fcakyon@users.noreply.github.com> Date: Mon, 22 Aug 2022 17:16:13 +0300 Subject: [PATCH 1/8] update transformers, gradio, hf hub versions --- requirements.txt | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index 4dd9c2a..dc6f342 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,10 +1,12 @@ accelerate>=0.12.0 evaluate>=0.2.2 -transformers>=4.21.1 +transformers @ git+https://github.com/huggingface/transformers.git@30992ef0d911bdeca425969d210771118a5cd1ac timm>=0.6.7 click==8.0.4 pytorchvideo balanced-loss scikit-learn tensorboard -opencv-python \ No newline at end of file +opencv-python +gradio>=3.1.6 +huggingface-hub @ git+https://github.com/huggingface/huggingface_hub.git@a1282ab170e6248a893764d5d6759efeaa7a5421 \ No newline at end of file From 9906c23c416465bd850761e50360e734db6f5d40 Mon Sep 17 00:00:00 2001 From: fcakyon <34196005+fcakyon@users.noreply.github.com> Date: Mon, 22 Aug 2022 17:16:48 +0300 Subject: [PATCH 2/8] improve videomae support --- video_transformers/backbones/transformers.py | 62 +++++++++++++++++--- 1 file changed, 55 insertions(+), 7 deletions(-) diff --git a/video_transformers/backbones/transformers.py b/video_transformers/backbones/transformers.py index b8f0ab3..61ea08f 100644 --- a/video_transformers/backbones/transformers.py +++ b/video_transformers/backbones/transformers.py @@ -33,6 +33,12 @@ def __init__(self, model_name: str, num_unfrozen_stages=0, **backbone_kwargs): else: raise NotImplementedError(f"Huggingface model not supported: {backbone.base_model_prefix}") + # set model input num frames + if hasattr(backbone.config, "num_frames"): # videomae + self._num_frames = backbone.config.num_frames + else: + self._num_frames = 1 + self.model = backbone self.num_features = num_features self.mean = mean @@ -62,21 +68,47 @@ def framework(self) -> Dict: def model_name(self) -> str: return self._model_name + @property + def num_frames(self) -> int: + return self._num_frames + def forward(self, x): - output = self.model(pixel_values=x, return_dict=True)[1] # output: batch x 1 x num_features - # convert to batch x num_features - return output.contiguous().view(output.shape[0], self.num_features) # output: batch x num_features + if self.model.base_model_prefix in models_3d: + # ensure num_timesteps matches model num_frames + if x.shape[2] != self.num_frames: + raise ValueError( + f"Input has {x.shape[2]} frames, but {self.model_name} accepts {self.num_frames} frames. Set num_timesteps to {self.num_frames}." + ) + # x: (B, C, T, H, W) + x = x.permute(0, 2, 1, 3, 4) + # x: (B, T, C, H, W) + output = self.model(pixel_values=x, return_dict=True)[0] + # output: batch x latent_dim x num_features + else: + output = self.model(pixel_values=x, return_dict=True)[1] + # output: batch x 1 x num_features + if output.dim() == 3: + output = output.mean(1) + return output # output: batch x num_features def unfreeze_last_n_stages(self, n): if self.model.base_model_prefix == "convnext": stages = [] - stages.append(self.model.base_model.embeddings) + # stages.append(self.model.base_model.embeddings) + # freeze embeddings + for param in self.model.base_model.embeddings.parameters(): + param.requires_grad = False + # unfreeze last n stages stages.extend(self.model.base_model.encoder.stages) stages.append(self.model.base_model.layernorm) unfreeze_last_n_stages_torch(stages, n) elif self.model.base_model_prefix == "levit": stages = [] - stages.extend(list(self.model.base_model.patch_embeddings.children())) + # stages.extend(list(self.model.base_model.patch_embeddings.children())) + # freeze embeddings + for param in self.model.base_model.patch_embeddings.parameters(): + param.requires_grad = False + # unfreeze last n stages stages.extend(self.model.base_model.encoder.stages) unfreeze_last_n_stages_torch(stages, n) elif self.model.base_model_prefix == "cvt": @@ -85,14 +117,30 @@ def unfreeze_last_n_stages(self, n): unfreeze_last_n_stages_torch(stages, n) elif self.model.base_model_prefix == "clip": stages = [] - stages.extend(list(self.model.base_model.vision_model.embeddings.children())) + # stages.extend(list(self.model.base_model.vision_model.embeddings.children())) + # freeze embeddings + for param in self.model.base_model.vision_model.embeddings.parameters(): + param.requires_grad = False + # unfreeze last n stages stages.extend(list(self.model.base_model.vision_model.encoder.layers.children())) unfreeze_last_n_stages_torch(stages, n) elif self.model.base_model_prefix in ["swin", "vit", "deit", "beit"]: stages = [] - stages.extend(list(self.model.base_model.embeddings.children())) + # stages.extend(list(self.model.base_model.embeddings.children())) + # freeze embeddings + for param in self.model.base_model.embeddings.parameters(): + param.requires_grad = False + # unfreeze last n stages stages.extend(list(self.model.base_model.encoder.layers.children())) stages.append(self.model.base_model.layernorm) unfreeze_last_n_stages_torch(stages, n) + elif self.model.base_model_prefix == "videomae": + stages = [] + # freeze embeddings + for param in self.model.base_model.embeddings.parameters(): + param.requires_grad = False + # unfreeze last n stages + stages.extend(list(self.model.base_model.encoder.layer.children())) + unfreeze_last_n_stages_torch(stages, n) else: raise NotImplementedError(f"Freezing not supported for Huggingface model: {self.model.base_model_prefix}") From 47ffc02355d2c50d0b2bf1b277b13b8039e299ef Mon Sep 17 00:00:00 2001 From: fcakyon <34196005+fcakyon@users.noreply.github.com> Date: Mon, 22 Aug 2022 17:17:33 +0300 Subject: [PATCH 3/8] add automated gradio app and hf model card generation utils --- video_transformers/templates.py | 191 ++++++++++++++++++++++++++++++++ 1 file changed, 191 insertions(+) create mode 100644 video_transformers/templates.py diff --git a/video_transformers/templates.py b/video_transformers/templates.py new file mode 100644 index 0000000..ff6b4d8 --- /dev/null +++ b/video_transformers/templates.py @@ -0,0 +1,191 @@ +from pathlib import Path +from typing import Any, Dict, List + + +def generate_labels_table(labels: List[str]) -> str: + str_1 = """ +| Labels | +| :-- | +""" + str_2 = "\n".join(["| " + label + " |" for label in labels]) + return str_1 + str_2 + + +def generate_dict_to_table(_dict: Dict[str, Any]) -> str: + if not _dict: + return "N/A" + else: + return "\n".join([f"| {key} | {value} |" for key, value in _dict.items()]) + + +def generate_hf_model_card( + task: str = "single_label_classification", + total_model_params: int = None, + total_trainable_model_params: int = None, + labels: List[str] = None, + preprocessor_config: Dict[str, Any] = None, + backbone_config: Dict[str, Any] = None, + neck_config: Dict[str, Any] = None, + head_config: Dict[str, Any] = None, +) -> str: + return ( + f""" +--- +library_name: video-transformers +tags: +- Video Transformers +- video-transformers +- video-classification +- image-classification +--- + +## Usage + +```python +from video_transformers import VideoModel + +model = VideoModel.from_pretrained(model_name_or_path) + +model.predict(video_path="video.mp4") +>> [{{'filename': "video.mp4", 'predictions': {{'class1': 0.98, 'class2': 0.02}}}}] +``` + +- Refer to [video-transformers](https://github.com/video-transformers/) for more details. + +## Model description + +This model is intended to be used for the task of classifying videos. +A video is an ordered sequence of frames. An individual frame of a video has spatial information whereas a sequence of video frames have temporal information. +This model can predict the following {len(labels)} labels: + +{generate_labels_table(labels)} + +| Model Details | Value | +| Task | {task} | +| Total Model Params | {str(total_model_params)} | +| Total Trainable Model Params | {str(total_trainable_model_params)} | + +| Preprocessor Config | Value | +| :-- | :-- | +""" + + generate_dict_to_table(preprocessor_config) + + """ + +| Backbone Config | Value | +| :-- | :-- | +""" + + generate_dict_to_table(backbone_config) + + """ + +| Neck Config | Value | +| :-- | :-- | +""" + + generate_dict_to_table(neck_config) + + """ + +| Head Config | Value | +| :-- | :-- | +""" + + generate_dict_to_table(head_config) + + """ + +Model-card auto generated by [video-transformers](https://github/fcakyon/video-transformers). +""" + ) + + +def export_hf_model_card( + export_dir: str, + task: str = "single_label_classification", + total_model_params: int = None, + total_trainable_model_params: int = None, + labels: List[str] = None, + preprocessor_config: Dict[str, Any] = None, + backbone_config: Dict[str, Any] = None, + neck_config: Dict[str, Any] = None, + head_config: Dict[str, Any] = None, +) -> str: + export_path = Path(export_dir) / "README.md" + # save as readme.md + with open(export_path, "w") as f: + f.write( + generate_hf_model_card( + task=task, + total_model_params=total_model_params, + total_trainable_model_params=total_trainable_model_params, + labels=labels, + preprocessor_config=preprocessor_config, + backbone_config=backbone_config, + neck_config=neck_config, + head_config=head_config, + ) + ) + + +def generate_gradio_app( + model_url: str, + examples: List[str], + author_username: str = None, +) -> str: + return f""" +import gradio as gr + +from video_transformers import VideoModel + +model: VideoModel = VideoModel.from_pretrained({model_url}) + +app = gr.Blocks() + +with app: + gr.Markdown("# **
Video Classification with Transformers
**") + gr.Markdown("This space demonstrates the use of hybrid Transformer-based models for video classification.") + gr.Markdown(f"The model is trained to classify videos belonging to the following classes: {model.labels}") + + with gr.Tabs(): + with gr.TabItem("Upload & Predict"): + with gr.Box(): + + with gr.Row(): + input_video = gr.Video(label="Input Video", show_label=True) + output_label = gr.Label(label="Model Output", show_label=True) + + gr.Markdown("**Predict**") + + with gr.Box(): + with gr.Row(): + submit_button = gr.Button("Submit") + + gr.Markdown("**Examples:**") + + # gr.Markdown("CricketShot, PlayingCello, Punch, ShavingBeard, TennisSwing") + + with gr.Column(): + gr.Examples({examples}, [input_video], [output_label], model.predict, cache_examples=True) + + submit_button.click(model.predict, inputs=input_video, outputs=[output_label]) + + gr.Markdown("**Note:** The model is trained to classify videos belonging to the following classes: {model.labels}") + + gr.Markdown("**Credits:**") + gr.Markdown("This space is powered by [video-transformers]('https://github.com/video-transformers/')") + gr.Markdown("This model is available at 'huggingface.co/{model_url}'.") + gr.Markdown("{"This model is finetuned by '" + author_username+"'." if author_username else ""}") +""" + + +if __name__ == "__main__": + from video_transformers import VideoModel + + model: VideoModel = VideoModel.from_pretrained("runs/hf_exp15/checkpoint") + print( + generate_hf_model_card( + labels=model.labels, + backbone_config=model.config["backbone"], + neck_config=model.config["neck"], + preprocessor_config=model.preprocessor_config, + head_config=model.config["head"], + total_model_params=model.num_total_params, + total_trainable_model_params=model.num_trainable_params, + ) + ) + print(generate_gradio_app("runs/hf_exp15", examples=["video.mp4"], author_username="fcakyon")) From 243a6803e1b4176895ce08a21bc9dc5ba2defa51 Mon Sep 17 00:00:00 2001 From: fcakyon <34196005+fcakyon@users.noreply.github.com> Date: Mon, 22 Aug 2022 17:18:03 +0300 Subject: [PATCH 4/8] add push to hub with auto model card generation --- video_transformers/hfhub_wrapper/hub_mixin.py | 185 ++++++++++++++++++ video_transformers/modeling.py | 81 +++++++- 2 files changed, 265 insertions(+), 1 deletion(-) create mode 100644 video_transformers/hfhub_wrapper/hub_mixin.py diff --git a/video_transformers/hfhub_wrapper/hub_mixin.py b/video_transformers/hfhub_wrapper/hub_mixin.py new file mode 100644 index 0000000..5c855e7 --- /dev/null +++ b/video_transformers/hfhub_wrapper/hub_mixin.py @@ -0,0 +1,185 @@ +import os +import tempfile +from pathlib import Path +from typing import List, Optional, Union + +from huggingface_hub import hf_api +from huggingface_hub.hf_api import HfApi, HfFolder +from huggingface_hub.repository import Repository + +from video_transformers.templates import export_hf_model_card + + +def push_to_hub( + self, + # NOTE: deprecated signature that will change in 0.12 + *, + repo_path_or_name: Optional[str] = None, + repo_url: Optional[str] = None, + commit_message: Optional[str] = "Add model", + organization: Optional[str] = None, + private: bool = False, + api_endpoint: Optional[str] = None, + use_auth_token: Optional[Union[bool, str]] = None, + git_user: Optional[str] = None, + git_email: Optional[str] = None, + config: Optional[dict] = None, + skip_lfs_files: bool = False, + # NOTE: New arguments since 0.9 + repo_id: Optional[str] = None, # optional only until 0.12 + token: Optional[str] = None, + branch: Optional[str] = None, + create_pr: Optional[bool] = None, + allow_patterns: Optional[Union[List[str], str]] = None, + ignore_patterns: Optional[Union[List[str], str]] = None, + # TODO (release 0.12): signature must be the following + # repo_id: str, + # *, + # commit_message: Optional[str] = "Add model", + # private: bool = False, + # api_endpoint: Optional[str] = None, + # token: Optional[str] = None, + # branch: Optional[str] = None, + # create_pr: Optional[bool] = None, + # config: Optional[dict] = None, + # allow_patterns: Optional[Union[List[str], str]] = None, + # ignore_patterns: Optional[Union[List[str], str]] = None, +) -> str: + """ + Upload model checkpoint to the Hub. + + Use `allow_patterns` and `ignore_patterns` to precisely filter which files + should be pushed to the hub. See [`upload_folder`] reference for more details. + + Parameters: + repo_id (`str`, *optional*): + Repository name to which push. + commit_message (`str`, *optional*): + Message to commit while pushing. + private (`bool`, *optional*, defaults to `False`): + Whether the repository created should be private. + api_endpoint (`str`, *optional*): + The API endpoint to use when pushing the model to the hub. + token (`str`, *optional*): + The token to use as HTTP bearer authorization for remote files. + If not set, will use the token set when logging in with + `transformers-cli login` (stored in `~/.huggingface`). + branch (`str`, *optional*): + The git branch on which to push the model. This defaults to + the default branch as specified in your repository, which + defaults to `"main"`. + create_pr (`boolean`, *optional*): + Whether or not to create a Pull Request from `branch` with that commit. + Defaults to `False`. + config (`dict`, *optional*): + Configuration object to be saved alongside the model weights. + allow_patterns (`List[str]` or `str`, *optional*): + If provided, only files matching at least one pattern are pushed. + ignore_patterns (`List[str]` or `str`, *optional*): + If provided, files matching any of the patterns are not pushed. + + Returns: + The url of the commit of your model in the given repository. + """ + # If the repo id is set, it means we use the new version using HTTP endpoint + # (introduced in v0.9). + if repo_id is not None: + token, _ = hf_api._validate_or_retrieve_token(token) + api = HfApi(endpoint=api_endpoint) + + api.create_repo( + repo_id=repo_id, + repo_type="model", + token=token, + private=private, + exist_ok=True, + ) + + # Push the files to the repo in a single commit + with tempfile.TemporaryDirectory() as tmp: + saved_path = Path(tmp) / repo_id + self.save_pretrained(saved_path, config=config) + export_hf_model_card( + export_dir=saved_path, + labels=self.labels, + backbone_config=self.config["backbone"], + neck_config=self.config["neck"], + preprocessor_config=self.config["preprocessor"], + head_config=self.config["head"], + total_model_params=self.num_total_params, + total_trainable_model_params=self.num_trainable_params, + ) + return api.upload_folder( + repo_id=repo_id, + repo_type="model", + token=token, + folder_path=saved_path, + commit_message=commit_message, + revision=branch, + create_pr=create_pr, + allow_patterns=allow_patterns, + ignore_patterns=ignore_patterns, + ) + + # If the repo id is None, it means we use the deprecated version using Git + # TODO: remove code between here and `return repo.git_push()` in release 0.12 + if repo_path_or_name is None and repo_url is None: + raise ValueError("You need to specify a `repo_path_or_name` or a `repo_url`.") + + if use_auth_token is None and repo_url is None: + token = HfFolder.get_token() + if token is None: + raise ValueError( + "You must login to the Hugging Face hub on this computer by typing" + " `huggingface-cli login` and entering your credentials to use" + " `use_auth_token=True`. Alternatively, you can pass your own token" + " as the `use_auth_token` argument." + ) + elif isinstance(use_auth_token, str): + token = use_auth_token + else: + token = None + + if repo_path_or_name is None: + repo_path_or_name = repo_url.split("/")[-1] + + # If no URL is passed and there's no path to a directory containing files, create a repo + if repo_url is None and not os.path.exists(repo_path_or_name): + repo_id = Path(repo_path_or_name).name + if organization: + repo_id = f"{organization}/{repo_id}" + repo_url = HfApi(endpoint=api_endpoint).create_repo( + repo_id=repo_id, + token=token, + private=private, + repo_type=None, + exist_ok=True, + ) + + repo = Repository( + repo_path_or_name, + clone_from=repo_url, + use_auth_token=use_auth_token, + git_user=git_user, + git_email=git_email, + skip_lfs_files=skip_lfs_files, + ) + repo.git_pull(rebase=True) + + # Save the files in the cloned repo + self.save_pretrained(repo_path_or_name, config=config) + export_hf_model_card( + export_dir=saved_path, + labels=self.labels, + backbone_config=self.config["backbone"], + neck_config=self.config["neck"], + preprocessor_config=self.config["preprocessor"], + head_config=self.config["head"], + total_model_params=self.num_total_params, + total_trainable_model_params=self.num_trainable_params, + ) + + # Commit and push! + repo.git_add(auto_lfs_track=True) + repo.git_commit(commit_message) + return repo.git_push() diff --git a/video_transformers/modeling.py b/video_transformers/modeling.py index b0d572b..8b20691 100644 --- a/video_transformers/modeling.py +++ b/video_transformers/modeling.py @@ -1,7 +1,7 @@ import json import os from pathlib import Path -from typing import Dict, List, Union +from typing import Dict, List, Optional, Union import torch from huggingface_hub.constants import PYTORCH_WEIGHTS_NAME @@ -13,6 +13,7 @@ import video_transformers.deployment.onnx import video_transformers.predict from video_transformers.heads import LinearHead +from video_transformers.hfhub_wrapper.hub_mixin import push_to_hub from video_transformers.utils.torch import get_num_total_params, get_num_trainable_params @@ -176,6 +177,84 @@ def _from_pretrained( return model + def push_to_hub( + self, + *, + repo_path_or_name: Optional[str] = None, + repo_url: Optional[str] = None, + commit_message: Optional[str] = "Add model", + organization: Optional[str] = None, + private: bool = False, + api_endpoint: Optional[str] = None, + use_auth_token: Optional[Union[bool, str]] = None, + git_user: Optional[str] = None, + git_email: Optional[str] = None, + config: Optional[dict] = None, + skip_lfs_files: bool = False, + repo_id: Optional[str] = None, + token: Optional[str] = None, + branch: Optional[str] = None, + create_pr: Optional[bool] = None, + allow_patterns: Optional[Union[List[str], str]] = None, + ignore_patterns: Optional[Union[List[str], str]] = None, + ) -> str: + """ + Upload model checkpoint to the Hub. + + Use `allow_patterns` and `ignore_patterns` to precisely filter which files + should be pushed to the hub. See [`upload_folder`] reference for more details. + + Parameters: + repo_id (`str`, *optional*): + Repository name to which push. + commit_message (`str`, *optional*): + Message to commit while pushing. + private (`bool`, *optional*, defaults to `False`): + Whether the repository created should be private. + api_endpoint (`str`, *optional*): + The API endpoint to use when pushing the model to the hub. + token (`str`, *optional*): + The token to use as HTTP bearer authorization for remote files. + If not set, will use the token set when logging in with + `transformers-cli login` (stored in `~/.huggingface`). + branch (`str`, *optional*): + The git branch on which to push the model. This defaults to + the default branch as specified in your repository, which + defaults to `"main"`. + create_pr (`boolean`, *optional*): + Whether or not to create a Pull Request from `branch` with that commit. + Defaults to `False`. + config (`dict`, *optional*): + Configuration object to be saved alongside the model weights. + allow_patterns (`List[str]` or `str`, *optional*): + If provided, only files matching at least one pattern are pushed. + ignore_patterns (`List[str]` or `str`, *optional*): + If provided, files matching any of the patterns are not pushed. + + Returns: + The url of the commit of your model in the given repository. + """ + return push_to_hub( + self=self, + repo_path_or_name=repo_path_or_name, + repo_url=repo_url, + commit_message=commit_message, + organization=organization, + private=private, + api_endpoint=api_endpoint, + use_auth_token=use_auth_token, + git_user=git_user, + git_email=git_email, + config=config, + skip_lfs_files=skip_lfs_files, + repo_id=repo_id, + token=token, + branch=branch, + create_pr=create_pr, + allow_patterns=allow_patterns, + ignore_patterns=ignore_patterns, + ) + def __init__( self, backbone: Union[TimeDistributed, video_transformers.backbones.base.Backbone], From 264930686ad3732b2fd757b025c10b0b90d1ec18 Mon Sep 17 00:00:00 2001 From: fcakyon <34196005+fcakyon@users.noreply.github.com> Date: Mon, 22 Aug 2022 17:18:17 +0300 Subject: [PATCH 5/8] add automated gradio app export --- video_transformers/deployment/gradio.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 video_transformers/deployment/gradio.py diff --git a/video_transformers/deployment/gradio.py b/video_transformers/deployment/gradio.py new file mode 100644 index 0000000..d02956c --- /dev/null +++ b/video_transformers/deployment/gradio.py @@ -0,0 +1,18 @@ +from pathlib import Path +from typing import List + +from video_transformers.templates import generate_gradio_app + + +def export_gradio_app( + model_url: str, + examples: List[str], + author_username: str = None, + export_dir: str = "runs/exports/", + export_filename: str = "app.py", +) -> str: + Path(export_dir).mkdir(parents=True, exist_ok=True) + export_path = Path(export_dir) / export_filename + # save as gradio app + with open(export_path, "w") as f: + f.write(generate_gradio_app(model_url, examples, author_username)) From c789021f9cc8a63853ae621f107f34d86e6e30b5 Mon Sep 17 00:00:00 2001 From: fcakyon <34196005+fcakyon@users.noreply.github.com> Date: Mon, 22 Aug 2022 17:34:57 +0300 Subject: [PATCH 6/8] update readme --- README.md | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 7e5b4d1..5f89704 100644 --- a/README.md +++ b/README.md @@ -205,7 +205,16 @@ model = VideoModel.from_pretrained("runs/exp/checkpoint") model.from_pretrained('account_name/model_name') ``` -- (Incoming feature) automatically Gradio app Huggingface Space: +- Push your model to HuggingFace hub with auto-generated model-cards: + +```python +from video_transformers import VideoModel + +model = VideoModel.from_pretrained("runs/exp/checkpoint") +model.push_to_hub('account_name/app_name') +``` + +- (Incoming feature) Push your model as a Gradio app to HuggingFace Space: ```python from video_transformers import VideoModel @@ -247,3 +256,14 @@ from video_transformers import VideoModel model = VideoModel.from_pretrained("runs/exp/checkpoint") model.to_onnx(quantize=False, opset_version=12, export_dir="runs/exports/", export_filename="model.onnx") ``` + +## 🤗 Gradio support + +- Convert your trained models into Gradio App for deployment: + +```python +from video_transformers import VideoModel + +model = VideoModel.from_pretrained("runs/exp/checkpoint") +model.to_gradio(examples=['video.mp4'], export_dir="runs/exports/", export_filename="app.py") +``` \ No newline at end of file From 065a69f0d269d90e70eb0dd6dee8b01684d4ad7b Mon Sep 17 00:00:00 2001 From: fcakyon <34196005+fcakyon@users.noreply.github.com> Date: Mon, 22 Aug 2022 17:35:48 +0300 Subject: [PATCH 7/8] add gradio export --- video_transformers/deployment/gradio.py | 11 +++++++---- video_transformers/modeling.py | 26 +++++++++++++++++++++++-- video_transformers/templates.py | 5 ++--- 3 files changed, 33 insertions(+), 9 deletions(-) diff --git a/video_transformers/deployment/gradio.py b/video_transformers/deployment/gradio.py index d02956c..ea80798 100644 --- a/video_transformers/deployment/gradio.py +++ b/video_transformers/deployment/gradio.py @@ -5,14 +5,17 @@ def export_gradio_app( - model_url: str, + model, examples: List[str], author_username: str = None, export_dir: str = "runs/exports/", export_filename: str = "app.py", ) -> str: Path(export_dir).mkdir(parents=True, exist_ok=True) - export_path = Path(export_dir) / export_filename + app_path = Path(export_dir) / export_filename + model_dir = Path(export_dir) / "checkpoint" + # save model + model.save_pretrained(model_dir, config=model.config) # save as gradio app - with open(export_path, "w") as f: - f.write(generate_gradio_app(model_url, examples, author_username)) + with open(app_path, "w") as f: + f.write(generate_gradio_app(model_dir, examples, author_username)) diff --git a/video_transformers/modeling.py b/video_transformers/modeling.py index 8b20691..07ae79b 100644 --- a/video_transformers/modeling.py +++ b/video_transformers/modeling.py @@ -10,7 +10,6 @@ from torch import nn import video_transformers.backbones.base -import video_transformers.deployment.onnx import video_transformers.predict from video_transformers.heads import LinearHead from video_transformers.hfhub_wrapper.hub_mixin import push_to_hub @@ -327,7 +326,7 @@ def config(self): else: config["neck"] = None config["labels"] = self.labels - config["preprocessor_config"] = self.preprocessor_config + config["preprocessor"] = self.preprocessor_config return config def to_onnx( @@ -346,9 +345,32 @@ def to_onnx( export_dir: Directory to export model to. export_filename: Filename to export model to. """ + import video_transformers.deployment.onnx video_transformers.deployment.onnx.export(self, quantize, opset_version, export_dir, export_filename) + def to_gradio( + self, + examples: List[str], + author_username: str = None, + export_dir: str = "runs/exports/", + export_filename: str = "app.py", + ): + """ + Export model as Gradio App. + + Args: + examples: List of examples to use for the app. + author_username: Author's username. + export_dir: Directory to export model to. + export_filename: Filename to export model to. + """ + import video_transformers.deployment.gradio + + video_transformers.deployment.gradio.export_gradio_app( + self, examples, author_username, export_dir, export_filename + ) + @property def example_input_array(self): if self.preprocessor_config: diff --git a/video_transformers/templates.py b/video_transformers/templates.py index ff6b4d8..b90491f 100644 --- a/video_transformers/templates.py +++ b/video_transformers/templates.py @@ -123,7 +123,7 @@ def export_hf_model_card( def generate_gradio_app( - model_url: str, + model_path_or_url: str, examples: List[str], author_username: str = None, ) -> str: @@ -132,7 +132,7 @@ def generate_gradio_app( from video_transformers import VideoModel -model: VideoModel = VideoModel.from_pretrained({model_url}) +model: VideoModel = VideoModel.from_pretrained({model_path_or_url}) app = gr.Blocks() @@ -168,7 +168,6 @@ def generate_gradio_app( gr.Markdown("**Credits:**") gr.Markdown("This space is powered by [video-transformers]('https://github.com/video-transformers/')") - gr.Markdown("This model is available at 'huggingface.co/{model_url}'.") gr.Markdown("{"This model is finetuned by '" + author_username+"'." if author_username else ""}") """ From 87a6d0098d8302ab5fcbb08d1b98da109efb2006 Mon Sep 17 00:00:00 2001 From: fcakyon <34196005+fcakyon@users.noreply.github.com> Date: Mon, 22 Aug 2022 17:47:44 +0300 Subject: [PATCH 8/8] fix gradio export --- video_transformers/deployment/gradio.py | 2 +- video_transformers/templates.py | 8 +++++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/video_transformers/deployment/gradio.py b/video_transformers/deployment/gradio.py index ea80798..21bd738 100644 --- a/video_transformers/deployment/gradio.py +++ b/video_transformers/deployment/gradio.py @@ -13,7 +13,7 @@ def export_gradio_app( ) -> str: Path(export_dir).mkdir(parents=True, exist_ok=True) app_path = Path(export_dir) / export_filename - model_dir = Path(export_dir) / "checkpoint" + model_dir = str(Path(export_dir) / "checkpoint") # save model model.save_pretrained(model_dir, config=model.config) # save as gradio app diff --git a/video_transformers/templates.py b/video_transformers/templates.py index b90491f..9026020 100644 --- a/video_transformers/templates.py +++ b/video_transformers/templates.py @@ -127,12 +127,16 @@ def generate_gradio_app( examples: List[str], author_username: str = None, ) -> str: + from video_transformers import VideoModel + + model = VideoModel.from_pretrained(model_path_or_url) + return f""" import gradio as gr from video_transformers import VideoModel -model: VideoModel = VideoModel.from_pretrained({model_path_or_url}) +model: VideoModel = VideoModel.from_pretrained("{model_path_or_url}") app = gr.Blocks() @@ -169,6 +173,8 @@ def generate_gradio_app( gr.Markdown("**Credits:**") gr.Markdown("This space is powered by [video-transformers]('https://github.com/video-transformers/')") gr.Markdown("{"This model is finetuned by '" + author_username+"'." if author_username else ""}") + +app.launch() """