From d7861b8bad5e7145ff864911f0e60168a97c550d Mon Sep 17 00:00:00 2001 From: Morgan McGuire Date: Tue, 29 Aug 2023 14:52:49 +0200 Subject: [PATCH 01/15] Create new fine-tuning page and update api page --- .../integrations/other/openai-finetuning.md | 193 +++++++++++++++++ docs/guides/integrations/other/openai.md | 201 +----------------- 2 files changed, 197 insertions(+), 197 deletions(-) create mode 100644 docs/guides/integrations/other/openai-finetuning.md diff --git a/docs/guides/integrations/other/openai-finetuning.md b/docs/guides/integrations/other/openai-finetuning.md new file mode 100644 index 0000000000..04ab34990b --- /dev/null +++ b/docs/guides/integrations/other/openai-finetuning.md @@ -0,0 +1,193 @@ +--- +slug: /guides/integrations/openai +description: How to Fine-Tune OpenAI models using W&B. +displayed_sidebar: default +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# OpenAI Fine-Tuning + +Log your OpenAI model's fine-tuning metrics and configuration to Weights & Biases to analyse and understand the performance of your newly fine-tuned models and share the results with your colleagues. + +If you use OpenAI's API to [fine-tune OpenAI models](https://platform.openai.com/docs/guides/fine-tuning/), you can now use the W&B integration to track experiments, models, and datasets in your central dashboard. + +![](/images/integrations/open_ai_api.png) + +All it takes is one line: `openai wandb sync` + +### :sparkles: Check out interactive examples + +* [Demo Colab](http://wandb.me/openai-colab) +* [Report - OpenAI Fine-Tuning Exploration and Tips](http://wandb.me/openai-report) + +### :tada: Sync your fine-tunes with one line! + +Make sure you are using latest version of openai and wandb. + +```shell-session +$ pip install --upgrade openai wandb +``` + +Then sync your results from the command line or from your script. + + + + +```shell-session +$ # one line command +$ openai wandb sync + +$ # passing optional parameters +$ openai wandb sync --help +``` + + + +```python +from openai.wandb_logger import WandbLogger + +# one line command +WandbLogger.sync() + +# passing optional parameters +WandbLogger.sync( + id=None, + n_fine_tunes=None, + project="OpenAI-Fine-Tune", + entity=None, + force=False, + **kwargs_wandb_init +) +``` + + + +We scan for new completed fine-tunes and automatically add them to your dashboard. + +![](/images/integrations/open_ai_auto_scan.png) + +In addition your training and validation files are logged and versioned, as well as details of your fine-tune results. This let you interactively explore your training and validation data. + +![](/images/integrations/open_ai_validation_files.png) + +### :gear: Optional arguments + +| Argument | Description | +| ------------------------ | ------------------------------------------------------------------------------------------------------------------------- | +| -i ID, --id ID | The id of the fine-tune (optional) | +| -n N, --n\_fine\_tunes N | Number of most recent fine-tunes to log when an id is not provided. By default, every fine-tune is synced. | +| --project PROJECT | Name of the project where you're sending runs. By default, it is "GPT-3". | +| --entity ENTITY | Username or team name where you're sending runs. By default, your default entity is used, which is usually your username. | +| --force | Forces logging and overwrite existing wandb run of the same fine-tune. | +| \*\*kwargs\_wandb\_init | In python, any additional argument is directly passed to [`wandb.init()`](../../../ref/python/init.md) | + +### 🔍 Inspect sample predictions + +Use [Tables](../../tables/intro.md) to better visualize sample predictions and compare models. + +![](/images/integrations/open_ai_inspect_sample.png) + +Create a new run: + +```python +run = wandb.init(project="OpenAI-Fine-Tune", job_type="eval") +``` + +Retrieve a model id for inference. + +You can use automatically logged artifacts to retrieve your latest model: + +```python +ft_artifact = run.use_artifact("ENTITY/PROJECT/fine_tune_details:latest") +fine_tuned_model = ft_artifact.metadata["fine_tuned_model"] +``` + +You can also retrieve your validation file: + +```python +artifact_valid = run.use_artifact("ENTITY/PROJECT/FILENAME:latest") +valid_file = artifact_valid.get_path("FILENAME").download() +``` + +Perform some inferences using OpenAI API: + +```python +# perform inference and record results +my_prompts = ["PROMPT_1", "PROMPT_2"] +results = [] +for prompt in my_prompts: + res = openai.Completion.create(model=fine_tuned_model, + prompt=prompt, + ...) + results.append(res["choices"][0]["text"]) +``` + +Log your results with a Table: + +```python +table = wandb.Table(columns=['prompt', 'completion'], + data=list(zip(my_prompts, results))) +``` + +## :question:Frequently Asked Questions + +### How do I share my fine-tune resutls with my team in W&B? + +Sync all your runs to your team account with: + +```shell-session +$ openai wandb sync --entity MY_TEAM_ACCOUNT +``` + +### How can I organize my runs? + +Your W&B runs are automatically organized and can be filtered/sorted based on any configuration parameter such as job type, base model, learning rate, training filename and any other hyper-parameter. + +In addition, you can rename your runs, add notes or create tags to group them. + +Once you’re satisfied, you can save your workspace and use it to create report, importing data from your runs and saved artifacts (training/validation files). + +### How can I access my fine-tune details? + +Fine-tune details are logged to W&B as artifacts and can be accessed with: + +```python +import wandb + +ft_artifact = wandb.run.use_artifact('USERNAME/PROJECT/job_details:VERSION') +``` + +where `VERSION` is either: + +* a version number such as `v2` +* the fine-tune id such as `ft-xxxxxxxxx` +* an alias added automatically such as `latest` or manually + +You can then access fine-tune details through `artifact_job.metadata`. For example, the fine-tuned model can be retrieved with `artifact_job.metadata[`"`fine_tuned_model"]`. + +### What if a fine-tune was not synced successfully? + +You can always call again `openai wandb sync` and we will re-sync any run that was not synced successfully. + +If needed, you can call `openai wandb sync --id fine_tune_id --force` to force re-syncing a specific fine-tune. + +### Can I track my datasets with W&B? + +Yes, you can integrate your entire pipeline to W&B through Artifacts, including creating your dataset, splitting it, training your models and evaluating them! + +This will allow complete traceability of your models. + +![](/images/integrations/open_ai_faq_can_track.png) + +## :books: Resources + +* [OpenAI Fine-tuning Documentation](https://beta.openai.com/docs/guides/fine-tuning) is very thorough and contains many useful tips +* [Demo Colab](http://wandb.me/openai-colab) +* [Report - OpenAI Fine-Tuning Exploration & Tips](http://wandb.me/openai-report) diff --git a/docs/guides/integrations/other/openai.md b/docs/guides/integrations/other/openai.md index 2ebdc7920f..5b099dbfc2 100644 --- a/docs/guides/integrations/other/openai.md +++ b/docs/guides/integrations/other/openai.md @@ -1,6 +1,6 @@ --- -slug: /guides/integrations/openai -description: How to integrate W&B with OpenAI. +slug: /guides/integrations/openai-api +description: How to use W&B with the OpenAI API. displayed_sidebar: default --- @@ -9,14 +9,7 @@ import TabItem from '@theme/TabItem'; # OpenAI API -Weights & Biases has 2 OpenAI integrations - -1. **[OpenAI Python SDK API](#log-openai-api-calls-in-1-line-of-code):** Log requests, responses, token counts and model metadata with 1 line of code for all OpenAI models - -2. **[OpenAI GPT-3 Fine-tuning](#log-openai-fine-tunes-to-wb):** Log your GPT-3 fine-tuning metrics and configuration to Weights & Biases to analyse and understand the performance of your newly fine-tuned models. - - -## Log OpenAI API calls in 1 line of code +Use the Weights & Biases OpenAI API integration to log requests, responses, token counts and model metadata with 1 line of code for all OpenAI models, including fine-tuned models. **[Try in a Colab Notebook here →](https://github.com/wandb/examples/blob/master/colabs/openai/OpenAI_API_Autologger_Quickstart.ipynb)** @@ -72,190 +65,4 @@ We recommend that you call `disable()` to close all W&B processes when you are f autolog.disable() ``` -Now your inputs and completions will be logged to Weights & Biases, ready for analysis or to be shared with colleagues. - - - - -## Log OpenAI fine-tunes to W&B - -If you use OpenAI's API to [fine-tune GPT-3](https://beta.openai.com/docs/guides/fine-tuning), you can now use the W&B integration to track experiments, models, and datasets in your central dashboard. - -![](/images/integrations/open_ai_api.png) - -All it takes is one line: `openai wandb sync` - -### :sparkles: Check out interactive examples - -* [Demo Colab](http://wandb.me/openai-colab) -* [Report - GPT-3 Exploration and Fine-Tuning Tips](http://wandb.me/openai-report) - -### :tada: Sync your fine-tunes with one line! - -Make sure you are using latest version of openai and wandb. - -```shell-session -$ pip install --upgrade openai wandb -``` - -Then sync your results from the command line or from your script. - - - - -```shell-session -$ # one line command -$ openai wandb sync - -$ # passing optional parameters -$ openai wandb sync --help -``` - - - -```python -from openai.wandb_logger import WandbLogger - -# one line command -WandbLogger.sync() - -# passing optional parameters -WandbLogger.sync( - id=None, - n_fine_tunes=None, - project="GPT-3", - entity=None, - force=False, - **kwargs_wandb_init -) -``` - - - -We scan for new completed fine-tunes and automatically add them to your dashboard. - -![](/images/integrations/open_ai_auto_scan.png) - -In addition your training and validation files are logged and versioned, as well as details of your fine-tune results. This let you interactively explore your training and validation data. - -![](/images/integrations/open_ai_validation_files.png) - -### :gear: Optional arguments - -| Argument | Description | -| ------------------------ | ------------------------------------------------------------------------------------------------------------------------- | -| -i ID, --id ID | The id of the fine-tune (optional) | -| -n N, --n\_fine\_tunes N | Number of most recent fine-tunes to log when an id is not provided. By default, every fine-tune is synced. | -| --project PROJECT | Name of the project where you're sending runs. By default, it is "GPT-3". | -| --entity ENTITY | Username or team name where you're sending runs. By default, your default entity is used, which is usually your username. | -| --force | Forces logging and overwrite existing wandb run of the same fine-tune. | -| \*\*kwargs\_wandb\_init | In python, any additional argument is directly passed to [`wandb.init()`](../../../ref/python/init.md) | - -### 🔍 Inspect sample predictions - -Use [Tables](../../tables/intro.md) to better visualize sample predictions and compare models. - -![](/images/integrations/open_ai_inspect_sample.png) - -Create a new run: - -```python -run = wandb.init(project="GPT-3", job_type="eval") -``` - -Retrieve a model id for inference. - -You can use automatically logged artifacts to retrieve your latest model: - -```python -artifact_job = run.use_artifact("ENTITY/PROJECT/fine_tune_details:latest") -fine_tuned_model = artifact_job.metadata["fine_tuned_model"] -``` - -You can also retrieve your validation file: - -```python -artifact_valid = run.use_artifact("ENTITY/PROJECT/FILENAME:latest") -valid_file = artifact_valid.get_path("FILENAME").download() -``` - -Perform some inferences using OpenAI API: - -```python -# perform inference and record results -my_prompts = ["PROMPT_1", "PROMPT_2"] -results = [] -for prompt in my_prompts: - res = openai.Completion.create(model=fine_tuned_model, - prompt=prompt, - ...) - results.append(res["choices"][0]["text"]) -``` - -Log your results with a Table: - -```python -table = wandb.Table(columns=['prompt', 'completion'], - data=list(zip(my_prompts, results))) -``` - -## :question:Frequently Asked Questions - -### How do I share runs with my team? - -Sync all your runs to your team account with: - -```shell-session -$ openai wandb sync --entity MY_TEAM_ACCOUNT -``` - -### How can I organize my runs? - -Your runs are automatically organized and can be filtered/sorted based on any configuration parameter such as job type, base model, learning rate, training filename and any other hyper-parameter. - -In addition, you can rename your runs, add notes or create tags to group them. - -Once you’re satisfied, you can save your workspace and use it to create report, importing data from your runs and saved artifacts (training/validation files). - -### How can I access my fine-tune details? - -Fine-tune details are logged to W&B as artifacts and can be accessed with: - -```python -import wandb - -artifact_job = wandb.run.use_artifact('USERNAME/PROJECT/job_details:VERSION') -``` - -where `VERSION` is either: - -* a version number such as `v2` -* the fine-tune id such as `ft-xxxxxxxxx` -* an alias added automatically such as `latest` or manually - -You can then access fine-tune details through `artifact_job.metadata`. For example, the fine-tuned model can be retrieved with `artifact_job.metadata[`"`fine_tuned_model"]`. - -### What if a fine-tune was not synced successfully? - -You can always call again `openai wandb sync` and we will re-sync any run that was not synced successfully. - -If needed, you can call `openai wandb sync --id fine_tune_id --force` to force re-syncing a specific fine-tune. - -### Can I track my datasets with W&B? - -Yes, you can integrate your entire pipeline to W&B through Artifacts, including creating your dataset, splitting it, training your models and evaluating them! - -This will allow complete traceability of your models. - -![](/images/integrations/open_ai_faq_can_track.png) - -## :books: Resources - -* [OpenAI Fine-tuning Documentation](https://beta.openai.com/docs/guides/fine-tuning) is very thorough and contains many useful tips -* [Demo Colab](http://wandb.me/openai-colab) -* [Report - GPT-3 Exploration & Fine-tuning Tips](http://wandb.me/openai-report) +Now your inputs and completions will be logged to Weights & Biases, ready for analysis or to be shared with colleagues. \ No newline at end of file From 5e290ef57bc6f57bb034fa61f63d5b456d6c9f6c Mon Sep 17 00:00:00 2001 From: Morgan McGuire Date: Tue, 29 Aug 2023 14:57:11 +0200 Subject: [PATCH 02/15] fix sidebar --- docs/guides/integrations/other/{openai.md => openai-api.md} | 0 sidebars.js | 6 ++++-- 2 files changed, 4 insertions(+), 2 deletions(-) rename docs/guides/integrations/other/{openai.md => openai-api.md} (100%) diff --git a/docs/guides/integrations/other/openai.md b/docs/guides/integrations/other/openai-api.md similarity index 100% rename from docs/guides/integrations/other/openai.md rename to docs/guides/integrations/other/openai-api.md diff --git a/sidebars.js b/sidebars.js index 6550ef9626..5052d6eeb5 100644 --- a/sidebars.js +++ b/sidebars.js @@ -361,7 +361,8 @@ const sidebars = { 'guides/integrations/mmdetection', 'guides/integrations/other/mmf', 'guides/integrations/other/composer', - 'guides/integrations/other/openai', + 'guides/integrations/other/openai-api', + 'guides/integrations/other/openai-finetuning', 'guides/integrations/other/openai-gym', 'guides/integrations/other/paddledetection', 'guides/integrations/other/paddleocr', @@ -714,7 +715,8 @@ const sidebars = { 'guides/integrations/mmdetection', 'guides/integrations/other/mmf', 'guides/integrations/other/composer', - 'guides/integrations/other/openai', + 'guides/integrations/other/openai-api', + 'guides/integrations/other/openai-finetuning', 'guides/integrations/other/openai-gym', 'guides/integrations/other/paddledetection', 'guides/integrations/other/paddleocr', From ef3089bb1af8350b714120aae246fef44cb953e6 Mon Sep 17 00:00:00 2001 From: Morgan McGuire Date: Tue, 29 Aug 2023 15:36:08 +0200 Subject: [PATCH 03/15] updates --- .../integrations/other/openai-finetuning.md | 37 +++++++++++++++---- 1 file changed, 30 insertions(+), 7 deletions(-) diff --git a/docs/guides/integrations/other/openai-finetuning.md b/docs/guides/integrations/other/openai-finetuning.md index 04ab34990b..42e12d15a4 100644 --- a/docs/guides/integrations/other/openai-finetuning.md +++ b/docs/guides/integrations/other/openai-finetuning.md @@ -11,11 +11,35 @@ import TabItem from '@theme/TabItem'; Log your OpenAI model's fine-tuning metrics and configuration to Weights & Biases to analyse and understand the performance of your newly fine-tuned models and share the results with your colleagues. +## Sync your OpenAI Fine-Tuning Results in 1 Line + If you use OpenAI's API to [fine-tune OpenAI models](https://platform.openai.com/docs/guides/fine-tuning/), you can now use the W&B integration to track experiments, models, and datasets in your central dashboard. -![](/images/integrations/open_ai_api.png) + + + +```shell-session +openai wandb sync +``` + + + + +```python +from openai.wandb_logger import WandbLogger + +WandbLogger.sync(project="OpenAI-Fine-Tune") +``` + + -All it takes is one line: `openai wandb sync` + +![](/images/integrations/open_ai_auto_scan.png) ### :sparkles: Check out interactive examples @@ -69,9 +93,7 @@ WandbLogger.sync( -We scan for new completed fine-tunes and automatically add them to your dashboard. - -![](/images/integrations/open_ai_auto_scan.png) +When you sync your results, wandb checks OpenAI for newly completed fine-tunes and automatically adds them to your dashboard. In addition your training and validation files are logged and versioned, as well as details of your fine-tune results. This let you interactively explore your training and validation data. @@ -86,6 +108,7 @@ In addition your training and validation files are logged and versioned, as well | --project PROJECT | Name of the project where you're sending runs. By default, it is "GPT-3". | | --entity ENTITY | Username or team name where you're sending runs. By default, your default entity is used, which is usually your username. | | --force | Forces logging and overwrite existing wandb run of the same fine-tune. | +| --legacy | Log results from the legacy OpenAI GPT-3 fine-tune api. | | \*\*kwargs\_wandb\_init | In python, any additional argument is directly passed to [`wandb.init()`](../../../ref/python/init.md) | ### 🔍 Inspect sample predictions @@ -123,7 +146,7 @@ Perform some inferences using OpenAI API: my_prompts = ["PROMPT_1", "PROMPT_2"] results = [] for prompt in my_prompts: - res = openai.Completion.create(model=fine_tuned_model, + res = openai.ChatCompletion.create(model=fine_tuned_model, prompt=prompt, ...) results.append(res["choices"][0]["text"]) @@ -188,6 +211,6 @@ This will allow complete traceability of your models. ## :books: Resources -* [OpenAI Fine-tuning Documentation](https://beta.openai.com/docs/guides/fine-tuning) is very thorough and contains many useful tips +* [OpenAI Fine-tuning Documentation](https://platform.openai.com/docs/guides/fine-tuning/) is very thorough and contains many useful tips * [Demo Colab](http://wandb.me/openai-colab) * [Report - OpenAI Fine-Tuning Exploration & Tips](http://wandb.me/openai-report) From dddfb3e3ec6bff20358f6e8c3beb414534c98a8d Mon Sep 17 00:00:00 2001 From: Morgan McGuire Date: Tue, 29 Aug 2023 15:49:43 +0200 Subject: [PATCH 04/15] rename openai page and add Prompts-openai page --- ...ai-finetuning.md => openai-fine-tuning.md} | 34 ++++++++----------- docs/guides/prompts/openai.md | 11 ++++++ 2 files changed, 26 insertions(+), 19 deletions(-) rename docs/guides/integrations/other/{openai-finetuning.md => openai-fine-tuning.md} (84%) create mode 100644 docs/guides/prompts/openai.md diff --git a/docs/guides/integrations/other/openai-finetuning.md b/docs/guides/integrations/other/openai-fine-tuning.md similarity index 84% rename from docs/guides/integrations/other/openai-finetuning.md rename to docs/guides/integrations/other/openai-fine-tuning.md index 42e12d15a4..ed9813263a 100644 --- a/docs/guides/integrations/other/openai-finetuning.md +++ b/docs/guides/integrations/other/openai-fine-tuning.md @@ -9,7 +9,7 @@ import TabItem from '@theme/TabItem'; # OpenAI Fine-Tuning -Log your OpenAI model's fine-tuning metrics and configuration to Weights & Biases to analyse and understand the performance of your newly fine-tuned models and share the results with your colleagues. +With Weights & Biases you can log your OpenAI model's fine-tuning metrics and configuration to Weights & Biases to analyse and understand the performance of your newly fine-tuned models and share the results with your colleagues. ## Sync your OpenAI Fine-Tuning Results in 1 Line @@ -41,17 +41,17 @@ WandbLogger.sync(project="OpenAI-Fine-Tune") ![](/images/integrations/open_ai_auto_scan.png) -### :sparkles: Check out interactive examples +### Check out interactive examples * [Demo Colab](http://wandb.me/openai-colab) * [Report - OpenAI Fine-Tuning Exploration and Tips](http://wandb.me/openai-report) -### :tada: Sync your fine-tunes with one line! +### Sync your fine-tunes with one line Make sure you are using latest version of openai and wandb. ```shell-session -$ pip install --upgrade openai wandb +pip install --upgrade openai wandb ``` Then sync your results from the command line or from your script. @@ -65,11 +65,11 @@ Then sync your results from the command line or from your script. ```shell-session -$ # one line command -$ openai wandb sync +# one line command +openai wandb sync -$ # passing optional parameters -$ openai wandb sync --help +# passing optional parameters +openai wandb sync --help ``` @@ -95,23 +95,19 @@ WandbLogger.sync( When you sync your results, wandb checks OpenAI for newly completed fine-tunes and automatically adds them to your dashboard. -In addition your training and validation files are logged and versioned, as well as details of your fine-tune results. This let you interactively explore your training and validation data. - -![](/images/integrations/open_ai_validation_files.png) - -### :gear: Optional arguments +### Optional arguments | Argument | Description | | ------------------------ | ------------------------------------------------------------------------------------------------------------------------- | | -i ID, --id ID | The id of the fine-tune (optional) | | -n N, --n\_fine\_tunes N | Number of most recent fine-tunes to log when an id is not provided. By default, every fine-tune is synced. | -| --project PROJECT | Name of the project where you're sending runs. By default, it is "GPT-3". | -| --entity ENTITY | Username or team name where you're sending runs. By default, your default entity is used, which is usually your username. | +| --project PROJECT | Name of the Weights & Biases project where you're sending runs. By default, it is "OpenAI-Fine-Tune". | +| --entity ENTITY | Weights & Biases Username or team name where you're sending runs. By default, your default entity is used, which is usually your username. | | --force | Forces logging and overwrite existing wandb run of the same fine-tune. | | --legacy | Log results from the legacy OpenAI GPT-3 fine-tune api. | | \*\*kwargs\_wandb\_init | In python, any additional argument is directly passed to [`wandb.init()`](../../../ref/python/init.md) | -### 🔍 Inspect sample predictions +### Inspect sample predictions Use [Tables](../../tables/intro.md) to better visualize sample predictions and compare models. @@ -159,14 +155,14 @@ table = wandb.Table(columns=['prompt', 'completion'], data=list(zip(my_prompts, results))) ``` -## :question:Frequently Asked Questions +## Frequently Asked Questions ### How do I share my fine-tune resutls with my team in W&B? Sync all your runs to your team account with: ```shell-session -$ openai wandb sync --entity MY_TEAM_ACCOUNT +openai wandb sync --entity MY_TEAM_ACCOUNT ``` ### How can I organize my runs? @@ -209,7 +205,7 @@ This will allow complete traceability of your models. ![](/images/integrations/open_ai_faq_can_track.png) -## :books: Resources +## Resources * [OpenAI Fine-tuning Documentation](https://platform.openai.com/docs/guides/fine-tuning/) is very thorough and contains many useful tips * [Demo Colab](http://wandb.me/openai-colab) diff --git a/docs/guides/prompts/openai.md b/docs/guides/prompts/openai.md new file mode 100644 index 0000000000..050da30a30 --- /dev/null +++ b/docs/guides/prompts/openai.md @@ -0,0 +1,11 @@ +--- +slug: /guides/prompts/openai-finetuning +description: How to fine-tune OpenAI models with W&B. +displayed_sidebar: default +--- + +# OpenAI Fine-Tuning + +With Weights & Biases you can log your OpenAI model's fine-tuning metrics and configuration to Weights & Biases to analyse and understand the performance of your newly fine-tuned models and share the results with your colleagues. + +See the [OpenAI Fine-Tuning section here](../integrations/other/openai-fine-tuning.md) to learn more \ No newline at end of file From f5a784278596ddb5dd11fa3aabda6bbe271e6e74 Mon Sep 17 00:00:00 2001 From: Morgan McGuire Date: Tue, 29 Aug 2023 15:51:02 +0200 Subject: [PATCH 05/15] update sidebar --- sidebars.js | 1 + 1 file changed, 1 insertion(+) diff --git a/sidebars.js b/sidebars.js index 5052d6eeb5..1fadc1074c 100644 --- a/sidebars.js +++ b/sidebars.js @@ -176,6 +176,7 @@ const sidebars = { }, items: [ 'guides/prompts/quickstart', + 'guides/prompts/openai', ], }, { From 11e80fe297c615590db848abcf1f45b90af91510 Mon Sep 17 00:00:00 2001 From: Morgan McGuire Date: Tue, 29 Aug 2023 15:54:14 +0200 Subject: [PATCH 06/15] update sidebar --- sidebars.js | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sidebars.js b/sidebars.js index 1fadc1074c..c180e79112 100644 --- a/sidebars.js +++ b/sidebars.js @@ -363,7 +363,7 @@ const sidebars = { 'guides/integrations/other/mmf', 'guides/integrations/other/composer', 'guides/integrations/other/openai-api', - 'guides/integrations/other/openai-finetuning', + 'guides/integrations/other/openai-fine-tuning', 'guides/integrations/other/openai-gym', 'guides/integrations/other/paddledetection', 'guides/integrations/other/paddleocr', @@ -558,6 +558,7 @@ const sidebars = { }, items: [ 'guides/prompts/quickstart', + 'guides/prompts/openai', ], }, { @@ -717,7 +718,7 @@ const sidebars = { 'guides/integrations/other/mmf', 'guides/integrations/other/composer', 'guides/integrations/other/openai-api', - 'guides/integrations/other/openai-finetuning', + 'guides/integrations/other/openai-fine-tuning', 'guides/integrations/other/openai-gym', 'guides/integrations/other/paddledetection', 'guides/integrations/other/paddleocr', From 002d0713113aa5ef3aca4006be7be51e48b0096b Mon Sep 17 00:00:00 2001 From: Morgan McGuire Date: Tue, 29 Aug 2023 15:59:30 +0200 Subject: [PATCH 07/15] updates --- docs/guides/integrations/other/openai-fine-tuning.md | 2 +- docs/guides/prompts/openai.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/guides/integrations/other/openai-fine-tuning.md b/docs/guides/integrations/other/openai-fine-tuning.md index ed9813263a..e994795a11 100644 --- a/docs/guides/integrations/other/openai-fine-tuning.md +++ b/docs/guides/integrations/other/openai-fine-tuning.md @@ -9,7 +9,7 @@ import TabItem from '@theme/TabItem'; # OpenAI Fine-Tuning -With Weights & Biases you can log your OpenAI model's fine-tuning metrics and configuration to Weights & Biases to analyse and understand the performance of your newly fine-tuned models and share the results with your colleagues. +With Weights & Biases you can log your OpenAI ChatGPT-3.5 or GPT-4 model's fine-tuning metrics and configuration to Weights & Biases to analyse and understand the performance of your newly fine-tuned models and share the results with your colleagues. ## Sync your OpenAI Fine-Tuning Results in 1 Line diff --git a/docs/guides/prompts/openai.md b/docs/guides/prompts/openai.md index 050da30a30..0c2df17991 100644 --- a/docs/guides/prompts/openai.md +++ b/docs/guides/prompts/openai.md @@ -8,4 +8,4 @@ displayed_sidebar: default With Weights & Biases you can log your OpenAI model's fine-tuning metrics and configuration to Weights & Biases to analyse and understand the performance of your newly fine-tuned models and share the results with your colleagues. -See the [OpenAI Fine-Tuning section here](../integrations/other/openai-fine-tuning.md) to learn more \ No newline at end of file +See the **[OpenAI Fine-Tuning section here](../integrations/openai)** to learn more \ No newline at end of file From b9bf6e8ba70b925073e9f6da446d651015777cd8 Mon Sep 17 00:00:00 2001 From: Morgan McGuire Date: Tue, 29 Aug 2023 16:07:40 +0200 Subject: [PATCH 08/15] fix sidebar title --- docs/guides/prompts/openai.md | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/docs/guides/prompts/openai.md b/docs/guides/prompts/openai.md index 0c2df17991..4771498bc1 100644 --- a/docs/guides/prompts/openai.md +++ b/docs/guides/prompts/openai.md @@ -1,11 +1,18 @@ --- -slug: /guides/prompts/openai-finetuning -description: How to fine-tune OpenAI models with W&B. +slug: /guides/prompts/openai +description: OpenAI integrations for Weights & Biases displayed_sidebar: default --- -# OpenAI Fine-Tuning +# OpenAI and Weights & Biases +Weights & Biases has 2 OpenAI integrations to help you train better models faster and keep track of your prompts and generations: +## OpenAI ChatGPT-3.5 and GPT-4 Fine-tuning With Weights & Biases you can log your OpenAI model's fine-tuning metrics and configuration to Weights & Biases to analyse and understand the performance of your newly fine-tuned models and share the results with your colleagues. -See the **[OpenAI Fine-Tuning section here](../integrations/openai)** to learn more \ No newline at end of file +See the **[W&B and OpenAI Fine-Tuning docs here](../integrations/openai)** to learn more + +## OpenAI Python SDK API +Log requests, responses, token counts and model metadata with 1 line of code for all OpenAI models + +See the **[W&B and OpenAI API section docs](../integrations/openai-api)** to learn more From 54b78b1ef6565d6f8bbaa69e26fbd674f1536bcb Mon Sep 17 00:00:00 2001 From: Morgan McGuire Date: Tue, 29 Aug 2023 16:09:21 +0200 Subject: [PATCH 09/15] fixes --- docs/guides/prompts/openai.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/prompts/openai.md b/docs/guides/prompts/openai.md index 4771498bc1..6433fe0469 100644 --- a/docs/guides/prompts/openai.md +++ b/docs/guides/prompts/openai.md @@ -7,7 +7,7 @@ displayed_sidebar: default # OpenAI and Weights & Biases Weights & Biases has 2 OpenAI integrations to help you train better models faster and keep track of your prompts and generations: -## OpenAI ChatGPT-3.5 and GPT-4 Fine-tuning +## OpenAI ChatGPT-3.5 and GPT-4 fine-tuning With Weights & Biases you can log your OpenAI model's fine-tuning metrics and configuration to Weights & Biases to analyse and understand the performance of your newly fine-tuned models and share the results with your colleagues. See the **[W&B and OpenAI Fine-Tuning docs here](../integrations/openai)** to learn more From 0f0750cbe5d6eb0585a89c306f2463035a8af237 Mon Sep 17 00:00:00 2001 From: Noah Luna <15202580+ngrayluna@users.noreply.github.com> Date: Wed, 13 Sep 2023 14:48:18 -0700 Subject: [PATCH 10/15] Added OpenAI FineTuning to JA --- .../integrations/other/openai-finetuning.md | 216 ++++++++++++++++++ 1 file changed, 216 insertions(+) create mode 100644 i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-finetuning.md diff --git a/i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-finetuning.md b/i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-finetuning.md new file mode 100644 index 0000000000..42e12d15a4 --- /dev/null +++ b/i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-finetuning.md @@ -0,0 +1,216 @@ +--- +slug: /guides/integrations/openai +description: How to Fine-Tune OpenAI models using W&B. +displayed_sidebar: default +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# OpenAI Fine-Tuning + +Log your OpenAI model's fine-tuning metrics and configuration to Weights & Biases to analyse and understand the performance of your newly fine-tuned models and share the results with your colleagues. + +## Sync your OpenAI Fine-Tuning Results in 1 Line + +If you use OpenAI's API to [fine-tune OpenAI models](https://platform.openai.com/docs/guides/fine-tuning/), you can now use the W&B integration to track experiments, models, and datasets in your central dashboard. + + + + +```shell-session +openai wandb sync +``` + + + + +```python +from openai.wandb_logger import WandbLogger + +WandbLogger.sync(project="OpenAI-Fine-Tune") +``` + + + + +![](/images/integrations/open_ai_auto_scan.png) + +### :sparkles: Check out interactive examples + +* [Demo Colab](http://wandb.me/openai-colab) +* [Report - OpenAI Fine-Tuning Exploration and Tips](http://wandb.me/openai-report) + +### :tada: Sync your fine-tunes with one line! + +Make sure you are using latest version of openai and wandb. + +```shell-session +$ pip install --upgrade openai wandb +``` + +Then sync your results from the command line or from your script. + + + + +```shell-session +$ # one line command +$ openai wandb sync + +$ # passing optional parameters +$ openai wandb sync --help +``` + + + +```python +from openai.wandb_logger import WandbLogger + +# one line command +WandbLogger.sync() + +# passing optional parameters +WandbLogger.sync( + id=None, + n_fine_tunes=None, + project="OpenAI-Fine-Tune", + entity=None, + force=False, + **kwargs_wandb_init +) +``` + + + +When you sync your results, wandb checks OpenAI for newly completed fine-tunes and automatically adds them to your dashboard. + +In addition your training and validation files are logged and versioned, as well as details of your fine-tune results. This let you interactively explore your training and validation data. + +![](/images/integrations/open_ai_validation_files.png) + +### :gear: Optional arguments + +| Argument | Description | +| ------------------------ | ------------------------------------------------------------------------------------------------------------------------- | +| -i ID, --id ID | The id of the fine-tune (optional) | +| -n N, --n\_fine\_tunes N | Number of most recent fine-tunes to log when an id is not provided. By default, every fine-tune is synced. | +| --project PROJECT | Name of the project where you're sending runs. By default, it is "GPT-3". | +| --entity ENTITY | Username or team name where you're sending runs. By default, your default entity is used, which is usually your username. | +| --force | Forces logging and overwrite existing wandb run of the same fine-tune. | +| --legacy | Log results from the legacy OpenAI GPT-3 fine-tune api. | +| \*\*kwargs\_wandb\_init | In python, any additional argument is directly passed to [`wandb.init()`](../../../ref/python/init.md) | + +### 🔍 Inspect sample predictions + +Use [Tables](../../tables/intro.md) to better visualize sample predictions and compare models. + +![](/images/integrations/open_ai_inspect_sample.png) + +Create a new run: + +```python +run = wandb.init(project="OpenAI-Fine-Tune", job_type="eval") +``` + +Retrieve a model id for inference. + +You can use automatically logged artifacts to retrieve your latest model: + +```python +ft_artifact = run.use_artifact("ENTITY/PROJECT/fine_tune_details:latest") +fine_tuned_model = ft_artifact.metadata["fine_tuned_model"] +``` + +You can also retrieve your validation file: + +```python +artifact_valid = run.use_artifact("ENTITY/PROJECT/FILENAME:latest") +valid_file = artifact_valid.get_path("FILENAME").download() +``` + +Perform some inferences using OpenAI API: + +```python +# perform inference and record results +my_prompts = ["PROMPT_1", "PROMPT_2"] +results = [] +for prompt in my_prompts: + res = openai.ChatCompletion.create(model=fine_tuned_model, + prompt=prompt, + ...) + results.append(res["choices"][0]["text"]) +``` + +Log your results with a Table: + +```python +table = wandb.Table(columns=['prompt', 'completion'], + data=list(zip(my_prompts, results))) +``` + +## :question:Frequently Asked Questions + +### How do I share my fine-tune resutls with my team in W&B? + +Sync all your runs to your team account with: + +```shell-session +$ openai wandb sync --entity MY_TEAM_ACCOUNT +``` + +### How can I organize my runs? + +Your W&B runs are automatically organized and can be filtered/sorted based on any configuration parameter such as job type, base model, learning rate, training filename and any other hyper-parameter. + +In addition, you can rename your runs, add notes or create tags to group them. + +Once you’re satisfied, you can save your workspace and use it to create report, importing data from your runs and saved artifacts (training/validation files). + +### How can I access my fine-tune details? + +Fine-tune details are logged to W&B as artifacts and can be accessed with: + +```python +import wandb + +ft_artifact = wandb.run.use_artifact('USERNAME/PROJECT/job_details:VERSION') +``` + +where `VERSION` is either: + +* a version number such as `v2` +* the fine-tune id such as `ft-xxxxxxxxx` +* an alias added automatically such as `latest` or manually + +You can then access fine-tune details through `artifact_job.metadata`. For example, the fine-tuned model can be retrieved with `artifact_job.metadata[`"`fine_tuned_model"]`. + +### What if a fine-tune was not synced successfully? + +You can always call again `openai wandb sync` and we will re-sync any run that was not synced successfully. + +If needed, you can call `openai wandb sync --id fine_tune_id --force` to force re-syncing a specific fine-tune. + +### Can I track my datasets with W&B? + +Yes, you can integrate your entire pipeline to W&B through Artifacts, including creating your dataset, splitting it, training your models and evaluating them! + +This will allow complete traceability of your models. + +![](/images/integrations/open_ai_faq_can_track.png) + +## :books: Resources + +* [OpenAI Fine-tuning Documentation](https://platform.openai.com/docs/guides/fine-tuning/) is very thorough and contains many useful tips +* [Demo Colab](http://wandb.me/openai-colab) +* [Report - OpenAI Fine-Tuning Exploration & Tips](http://wandb.me/openai-report) From d562ce8956c97e581018f3bdb4bca7955e780ac8 Mon Sep 17 00:00:00 2001 From: Noah Luna <15202580+ngrayluna@users.noreply.github.com> Date: Wed, 13 Sep 2023 14:50:14 -0700 Subject: [PATCH 11/15] Added Open AI v2 to JA --- .../guides/integrations/other/openai-api.md | 68 +++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-api.md diff --git a/i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-api.md b/i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-api.md new file mode 100644 index 0000000000..5b099dbfc2 --- /dev/null +++ b/i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-api.md @@ -0,0 +1,68 @@ +--- +slug: /guides/integrations/openai-api +description: How to use W&B with the OpenAI API. +displayed_sidebar: default +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# OpenAI API + +Use the Weights & Biases OpenAI API integration to log requests, responses, token counts and model metadata with 1 line of code for all OpenAI models, including fine-tuned models. + +**[Try in a Colab Notebook here →](https://github.com/wandb/examples/blob/master/colabs/openai/OpenAI_API_Autologger_Quickstart.ipynb)** + +With just 1 line of code you can now automatically log inputs and outputs from the OpenAI Python SDK to Weights & Biases! + +![](/images/integrations/open_ai_autolog.png) + +Once you start logging your API inputs and outputs you can quickly evaluate the performance of difference prompts, compare different model settings (such as temperature), and track other usage metrics such as token usage. + +To get started, pip install the `wandb` library, then follow the steps below: + +### 1. Import autolog and initialise it +First, import `autolog` from `wandb.integration.openai` and initialise it. + +```python +import os +import openai +from wandb.integration.openai import autolog + +autolog({"project":"gpt5"}) +``` + +You can optionally pass a dictionary with argument that `wandb.init()` accepts to `autolog`. This includes a project name, team name, entity, and more. For more information about [`wandb.init`](../../../ref/python/init.md), see the API Reference Guide. + +### 2. Call the OpenAI API +Each call you make to the OpenAI API will now be logged to Weights & Biases automatically. + +```python +os.environ["OPENAI_API_KEY"] = "XXX" + +chat_request_kwargs = dict( + model="gpt-3.5-turbo", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Who won the world series in 2020?"}, + {"role": "assistant", "content": "The Los Angeles Dodgers"}, + {"role": "user", "content": "Where was it played?"}, + ], +) +response = openai.ChatCompletion.create(**chat_request_kwargs) +``` + +### 3. View your OpenAI API inputs and responses + +Click on the Weights & Biases [run](../../runs/intro.md) link generated by `autolog` in **step 1**. This will redirect you to your project workspace in the W&B App. + +Select a run you created to view the trace table, trace timeline and the model architecture of the OpenAI LLM used. + +### 4. Disable autolog +We recommend that you call `disable()` to close all W&B processes when you are finished using the OpenAI API. + +```python +autolog.disable() +``` + +Now your inputs and completions will be logged to Weights & Biases, ready for analysis or to be shared with colleagues. \ No newline at end of file From d0e5c16919f39797db06144b9a43c7dca20e8310 Mon Sep 17 00:00:00 2001 From: Morgan McGuire Date: Fri, 15 Sep 2023 11:27:18 +0100 Subject: [PATCH 12/15] fix openai link in prompts intro --- docs/guides/prompts/intro.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/guides/prompts/intro.md b/docs/guides/prompts/intro.md index b05b33fc7b..e34ce3da1f 100644 --- a/docs/guides/prompts/intro.md +++ b/docs/guides/prompts/intro.md @@ -60,7 +60,8 @@ The Model Architecture view provides details about the structure of the chain an Weights and Biases also has lightweight integrations for: * [LangChain](../integrations/langchain.md) -* [OpenAI API](../integrations/other/openai.md) +* [OpenAI API](../integrations/openai-api) +* [OpenAI GPT-3.5 Fine-Tuning](../integrations/openai) * [Hugging Face Transformers](../integrations/huggingface.md) From fb5b3b9a5ab63123459a85684bbd587d77f87768 Mon Sep 17 00:00:00 2001 From: Morgan McGuire Date: Fri, 15 Sep 2023 11:30:45 +0100 Subject: [PATCH 13/15] fix openai link in prompts intro again --- docs/guides/prompts/intro.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/guides/prompts/intro.md b/docs/guides/prompts/intro.md index e34ce3da1f..c62d942cf0 100644 --- a/docs/guides/prompts/intro.md +++ b/docs/guides/prompts/intro.md @@ -60,8 +60,8 @@ The Model Architecture view provides details about the structure of the chain an Weights and Biases also has lightweight integrations for: * [LangChain](../integrations/langchain.md) -* [OpenAI API](../integrations/openai-api) -* [OpenAI GPT-3.5 Fine-Tuning](../integrations/openai) +* [OpenAI API](../guides/integrations/openai-api) +* [OpenAI GPT-3.5 Fine-Tuning](../guides/integrations/openai) * [Hugging Face Transformers](../integrations/huggingface.md) From 70376ffc1a811a7fd0238d109688816150bdc6fc Mon Sep 17 00:00:00 2001 From: Noah Luna <15202580+ngrayluna@users.noreply.github.com> Date: Fri, 15 Sep 2023 10:58:23 -0700 Subject: [PATCH 14/15] Removed old page --- .../guides/integrations/other/openai-api.md | 4 +- .../integrations/other/openai-finetuning.md | 7 +- .../guides/integrations/other/openai.md | 252 ------------------ 3 files changed, 3 insertions(+), 260 deletions(-) delete mode 100644 i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai.md diff --git a/i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-api.md b/i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-api.md index 5b099dbfc2..b0ff25d09a 100644 --- a/i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-api.md +++ b/i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-api.md @@ -1,7 +1,7 @@ --- slug: /guides/integrations/openai-api description: How to use W&B with the OpenAI API. -displayed_sidebar: default +displayed_sidebar: ja --- import Tabs from '@theme/Tabs'; @@ -32,7 +32,7 @@ from wandb.integration.openai import autolog autolog({"project":"gpt5"}) ``` -You can optionally pass a dictionary with argument that `wandb.init()` accepts to `autolog`. This includes a project name, team name, entity, and more. For more information about [`wandb.init`](../../../ref/python/init.md), see the API Reference Guide. +You can optionally pass a dictionary with argument that `wandb.init()` accepts to `autolog`. This includes a project name, team name, entity, and more. ### 2. Call the OpenAI API Each call you make to the OpenAI API will now be logged to Weights & Biases automatically. diff --git a/i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-finetuning.md b/i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-finetuning.md index 0ccecd7cb7..2828c7c08f 100644 --- a/i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-finetuning.md +++ b/i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-finetuning.md @@ -1,7 +1,7 @@ --- slug: /guides/integrations/openai description: How to Fine-Tune OpenAI models using W&B. -displayed_sidebar: default +displayed_sidebar: ja --- import Tabs from '@theme/Tabs'; @@ -23,10 +23,6 @@ If you use OpenAI's API to [fine-tune OpenAI models](https://platform.openai.com ]}> -<<<<<<< HEAD:i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-finetuning.md -```shell-session -openai wandb sync -======= ## Log OpenAI API calls in 1 line of code **[Try in a Colab Notebook here →](https://github.com/wandb/examples/blob/master/colabs/openai/OpenAI_API_Autologger_Quickstart.ipynb)** @@ -48,7 +44,6 @@ import openai from wandb.integration.openai import autolog autolog({"project": "gpt5"}) ->>>>>>> 80e3194f4edf963a4f24122b9ad4d89ec9014953:docs/guides/integrations/other/openai.md ``` diff --git a/i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai.md b/i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai.md deleted file mode 100644 index 422bb2981d..0000000000 --- a/i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai.md +++ /dev/null @@ -1,252 +0,0 @@ ---- -slug: /guides/integrations/openai -description: How to integrate W&B with OpenAI. -displayed_sidebar: ja ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - -# OpenAI API - -:::info -**Beta Integration**: これは新しい機能であり、この機能を改善するために積極的に取り組んでいます。何かフィードバックがあれば、お気軽にお問い合わせください — contact@wandb.com -::: - -OpenAIのAPIは、機械学習開発者がGPT-4にアクセスできるようにします。これは非常にパワフルな自然言語モデルであり、自然言語を理解したり生成したりするタスクにほぼ適用できます。 - -## OpenAI APIコールを1行のコードでログに記録する -たった1行のコードで、OpenAI Python SDKからWeights & Biasesに入力と出力を自動的にログに記録できるようになりました! - -![](/images/integrations/open_ai_autolog.png) - -始めるには、`wandb`ライブラリをpipでインストールし、以下の手順に従ってください。 - -### 1. autologをインポートして初期化する -まず、`wandb.integration.openai`から`autolog`をインポートし、初期化します。 - -```python -import os -import openai -from wandb.integration.openai import autolog - -autolog({"project": "gpt5"}) -``` - -必要に応じて、`autolog`に`wandb.init()`が受け付ける引数を含むディクショナリを渡すことができます。これには、プロジェクト名、チーム名、エンティティなどが含まれます。 [`wandb.init`](../../../ref/python/init.md)についての詳細は、APIリファレンスガイドを参照してください。 - -### 2. OpenAI APIを呼び出す -これで、OpenAI APIへの各呼び出しがWeights&Biasesに自動的に記録されます。 - -```python -os.environ["OPENAI_API_KEY"] = "XXX" - -chat_request_kwargs = dict( - model="gpt-3.5-turbo", - messages=[ - {"role": "system", "content": "あなたは親切なアシスタントです。"}, - {"role": "user", "content": "2020年のワールドシリーズで誰が勝ちましたか?"}, - {"role": "assistant", "content": "ロサンゼルス・ドジャースです"}, - {"role": "user", "content": "それはどこで行われましたか?"}, - ], -) -response = openai.ChatCompletion.create(**chat_request_kwargs) -``` - -### 3. OpenAI APIの入力とレスポンスを表示する - -**手順1**で`autolog`によって生成されたWeights&Biases [run](../../runs/intro.md)リンクをクリックします。これにより、W&Bアプリのプロジェクトワークスペースにリダイレクトされます。 - -作成したrunを選択して、トレーステーブル、トレースタイムライン、および使用されたOpenAI LLMのアーキテクチャーを表示します。 -### 4. オートログを無効にする -OpenAI APIを使用し終わったら、`disable()`を呼び出して、W&Bのすべてのプロセスを閉じることをお勧めします。 - -```python -autolog.disable() -``` - -これで、入力と完成がWeights & Biasesにログされ、分析や同僚との共有の準備が整います。 - -## W&BにOpenAIの微調整をログする - -OpenAIのAPIを使って[GPT-3を微調整する](https://beta.openai.com/docs/guides/fine-tuning)場合、W&Bの統合を利用して、実験、モデル、データセットを一元管理できるダッシュボードでトラッキングできるようになります。 - -![](/images/integrations/open_ai_api.png) - -必要なのは、`openai wandb sync`という一行だけです。 - -## :sparkles: インタラクティブな例をチェックしよう - -* [デモColab](http://wandb.me/openai-colab) -* [レポート - GPT-3の探索と微調整のヒント](http://wandb.me/openai-report) - -## :tada: 1行で微調整を同期しよう! - -openaiとwandbの最新バージョンを使用していることを確認してください。 - -```shell-session -$ pip install --upgrade openai wandb -``` - -次に、コマンドラインまたはスクリプトから結果を同期します。 - - - - -```shell-session -$ # 1行のコマンド -$ openai wandb sync - -$ # オプションパラメータを渡す -$ openai wandb sync --help -``` - - - -```python -from openai.wandb_logger import WandbLogger - -# 1行のコマンド -WandbLogger.sync() - -# オプションパラメータを渡す -WandbLogger.sync( - id=None, - n_fine_tunes=None, - project="GPT-3", - entity=None, - force=False, - **kwargs_wandb_init -) -``` - - -新しい完了した微調整をスキャンし、自動的にダッシュボードに追加します。 - -![](/images/integrations/open_ai_auto_scan.png) - -さらに、トレーニングと検証のファイルがログ化されバージョン管理され、微調整の結果の詳細も記録されます。これにより、トレーニングデータと検証データを対話式に調べることができます。 - -![](/images/integrations/open_ai_validation_files.png) - -## :gear: 任意の引数 - -| 引数 | 説明 | -| ------------------------ | ------------------------------------------------------------------------------------------------------------------------ | -| -i ID, --id ID | 微調整のID(任意) | -| -n N, --n\_fine\_tunes N | IDが提供されていない場合にログする最も新しい微調整の数。デフォルトでは、すべての微調整が同期されます。 | -| --project PROJECT | プロジェクトの名前。デフォルトでは、「GPT-3」です。 | -| --entity ENTITY | runsを送信するユーザー名またはチーム名。デフォルトでは、デフォルトのエンティティ(通常はユーザー名)が使用されます。 | -| --force | ログ記録を強制し、同じ微調整の既存のwandb runを上書きします。 | -| \*\*kwargs\_wandb\_init | Pythonでは、追加の引数は[`wandb.init()`](../../../ref/python/init.md)に直接渡されます。 | - -## 🔍 サンプル予測の検証 - -[Tables](../../tables/intro.md)を使用して、サンプル予測をより良く可視化し、モデルを比較します。 - -![](/images/integrations/open_ai_inspect_sample.png) - -新しいrunを作成します: - -```python -run = wandb.init(project="GPT-3", job_type="eval") -``` -推論用のモデルIDを取得します。 - -自動的にログされたアーティファクトを使用して、最新のモデルを取得できます。 - -```python -artifact_job = run.use_artifact("ENTITY/PROJECT/fine_tune_details:latest") -fine_tuned_model = artifact_job.metadata["fine_tuned_model"] -``` - -検証ファイルも取得できます。 - -```python -artifact_valid = run.use_artifact("ENTITY/PROJECT/FILENAME:latest") -valid_file = artifact_valid.get_path("FILENAME").download() -``` - -OpenAI APIを使っていくつかの推論を行います。 - -```python -# 推論を行い結果を記録する -my_prompts = ["PROMPT_1", "PROMPT_2"] -results = [] -for prompt in my_prompts: - res = openai.Completion.create(model=fine_tuned_model, prompt=prompt, ...) - results.append(res["choices"][0]["text"]) -``` - -結果をテーブルでログします。 -```python -table = wandb.Table( - columns=["prompt", "completion"], data=list(zip(my_prompts, results)) -) -``` - -## :question:よくある質問 - -### どのようにしてチームとrunを共有できますか? - -以下のようにして、すべてのrunをチームアカウントと同期させます。 - -```shell-session -$ openai wandb sync --entity MY_TEAM_ACCOUNT -``` - -### runをどのように整理できますか? - -runは自動的に整理され、ジョブタイプ、ベースモデル、学習率、トレーニングファイル名、その他のハイパーパラメータなどの設定パラメータに基づいてフィルター/並び替えができます。 - -また、runの名前を変更したり、ノートを追加したり、タグを作成してグループ化することができます。 - -満足したら、ワークスペースを保存し、レポートを作成するために、runからデータや保存されたアーティファクト(トレーニング/検証ファイル)をインポートできます。 - -### 微調整の詳細にどのようにアクセスできますか? - -微調整の詳細はW&Bにアーティファクトとしてログされており、以下のようにアクセスできます。 - -```python -import wandb -``` -artifact_job = wandb.run.use_artifact('USERNAME/PROJECT/job_details:VERSION') - -``` - -ここで`VERSION`は以下のいずれかです。 - -* バージョン番号(例:`v2`) - -* 微調整ID(例:`ft-xxxxxxxxx`) - -* 自動的に追加されたエイリアス(例:`latest`)または手動で追加されたエイリアス - -その後、`artifact_job.metadata` を通して微調整の詳細にアクセスできます。例えば、微調整されたモデルは `artifact_job.metadata["fine_tuned_model"]`で取得できます。 - -### ファインチューンが正常に同期されなかった場合は? - -いつでも `openai wandb sync` を再度呼び出すことで、正常に同期されなかったランを再同期できます。 - -必要に応じて、`openai wandb sync --id fine_tune_id --force` を呼び出して、特定のファインチューンを強制的に再同期できます。 - -### W&Bでデータセットをトラッキングできますか? - -はい、Artifactsを通じて、データセットの作成、分割、モデルのトレーニングおよび評価を含む、W&Bの完全な開発フローを統合できます! - -これにより、モデルの完全なトレーサビリティが実現されます。 - -![](/images/integrations/open_ai_faq_can_track.png) - -## :books: リソース - -* [OpenAI Fine-tuning Documentation](https://beta.openai.com/docs/guides/fine-tuning) は非常に詳細で、多くの有益なヒントが含まれています。 - -* [デモColab](http://wandb.me/openai-colab) - -* [レポート - GPT-3 Exploration & Fine-tuning Tips](http://wandb.me/openai-report) \ No newline at end of file From 6cc898e27361e0b0148eb80d36143c2a409d72c5 Mon Sep 17 00:00:00 2001 From: Noah Luna <15202580+ngrayluna@users.noreply.github.com> Date: Fri, 15 Sep 2023 11:04:03 -0700 Subject: [PATCH 15/15] Fixed buld break --- .../guides/integrations/other/openai-api.md | 4 +- ...ai-finetuning.md => openai-fine-tuning.md} | 72 ++++++------------- .../guides/integrations/other/openai-gym.md | 16 +++-- 3 files changed, 33 insertions(+), 59 deletions(-) rename i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/{openai-finetuning.md => openai-fine-tuning.md} (68%) diff --git a/i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-api.md b/i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-api.md index b0ff25d09a..5b099dbfc2 100644 --- a/i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-api.md +++ b/i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-api.md @@ -1,7 +1,7 @@ --- slug: /guides/integrations/openai-api description: How to use W&B with the OpenAI API. -displayed_sidebar: ja +displayed_sidebar: default --- import Tabs from '@theme/Tabs'; @@ -32,7 +32,7 @@ from wandb.integration.openai import autolog autolog({"project":"gpt5"}) ``` -You can optionally pass a dictionary with argument that `wandb.init()` accepts to `autolog`. This includes a project name, team name, entity, and more. +You can optionally pass a dictionary with argument that `wandb.init()` accepts to `autolog`. This includes a project name, team name, entity, and more. For more information about [`wandb.init`](../../../ref/python/init.md), see the API Reference Guide. ### 2. Call the OpenAI API Each call you make to the OpenAI API will now be logged to Weights & Biases automatically. diff --git a/i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-finetuning.md b/i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-fine-tuning.md similarity index 68% rename from i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-finetuning.md rename to i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-fine-tuning.md index 2828c7c08f..e994795a11 100644 --- a/i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-finetuning.md +++ b/i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-fine-tuning.md @@ -1,7 +1,7 @@ --- slug: /guides/integrations/openai description: How to Fine-Tune OpenAI models using W&B. -displayed_sidebar: ja +displayed_sidebar: default --- import Tabs from '@theme/Tabs'; @@ -9,7 +9,7 @@ import TabItem from '@theme/TabItem'; # OpenAI Fine-Tuning -Log your OpenAI model's fine-tuning metrics and configuration to Weights & Biases to analyse and understand the performance of your newly fine-tuned models and share the results with your colleagues. +With Weights & Biases you can log your OpenAI ChatGPT-3.5 or GPT-4 model's fine-tuning metrics and configuration to Weights & Biases to analyse and understand the performance of your newly fine-tuned models and share the results with your colleagues. ## Sync your OpenAI Fine-Tuning Results in 1 Line @@ -23,27 +23,8 @@ If you use OpenAI's API to [fine-tune OpenAI models](https://platform.openai.com ]}> -## Log OpenAI API calls in 1 line of code - -**[Try in a Colab Notebook here →](https://github.com/wandb/examples/blob/master/colabs/openai/OpenAI_API_Autologger_Quickstart.ipynb)** - -With just 1 line of code you can now automatically log inputs and outputs from the OpenAI Python SDK to Weights & Biases! - -![](/images/integrations/open_ai_autolog.png) - -Once you start logging your API inputs and outputs you can quickly evaluate the performance of difference prompts, compare different model settings (such as temperature), and track other usage metrics such as token usage. - -To get started, pip install the `wandb` library, then follow the steps below: - -### 1. Import autolog and initialise it -First, import `autolog` from `wandb.integration.openai` and initialise it. - -```python -import os -import openai -from wandb.integration.openai import autolog - -autolog({"project": "gpt5"}) +```shell-session +openai wandb sync ``` @@ -60,17 +41,17 @@ WandbLogger.sync(project="OpenAI-Fine-Tune") ![](/images/integrations/open_ai_auto_scan.png) -### :sparkles: Check out interactive examples +### Check out interactive examples * [Demo Colab](http://wandb.me/openai-colab) * [Report - OpenAI Fine-Tuning Exploration and Tips](http://wandb.me/openai-report) -### :tada: Sync your fine-tunes with one line! +### Sync your fine-tunes with one line Make sure you are using latest version of openai and wandb. ```shell-session -$ pip install --upgrade openai wandb +pip install --upgrade openai wandb ``` Then sync your results from the command line or from your script. @@ -84,11 +65,11 @@ Then sync your results from the command line or from your script. ```shell-session -$ # one line command -$ openai wandb sync +# one line command +openai wandb sync -$ # passing optional parameters -$ openai wandb sync --help +# passing optional parameters +openai wandb sync --help ``` @@ -114,23 +95,19 @@ WandbLogger.sync( When you sync your results, wandb checks OpenAI for newly completed fine-tunes and automatically adds them to your dashboard. -In addition your training and validation files are logged and versioned, as well as details of your fine-tune results. This let you interactively explore your training and validation data. - -![](/images/integrations/open_ai_validation_files.png) - -### :gear: Optional arguments +### Optional arguments | Argument | Description | | ------------------------ | ------------------------------------------------------------------------------------------------------------------------- | | -i ID, --id ID | The id of the fine-tune (optional) | | -n N, --n\_fine\_tunes N | Number of most recent fine-tunes to log when an id is not provided. By default, every fine-tune is synced. | -| --project PROJECT | Name of the project where you're sending runs. By default, it is "GPT-3". | -| --entity ENTITY | Username or team name where you're sending runs. By default, your default entity is used, which is usually your username. | +| --project PROJECT | Name of the Weights & Biases project where you're sending runs. By default, it is "OpenAI-Fine-Tune". | +| --entity ENTITY | Weights & Biases Username or team name where you're sending runs. By default, your default entity is used, which is usually your username. | | --force | Forces logging and overwrite existing wandb run of the same fine-tune. | | --legacy | Log results from the legacy OpenAI GPT-3 fine-tune api. | | \*\*kwargs\_wandb\_init | In python, any additional argument is directly passed to [`wandb.init()`](../../../ref/python/init.md) | -### 🔍 Inspect sample predictions +### Inspect sample predictions Use [Tables](../../tables/intro.md) to better visualize sample predictions and compare models. @@ -165,32 +142,27 @@ Perform some inferences using OpenAI API: my_prompts = ["PROMPT_1", "PROMPT_2"] results = [] for prompt in my_prompts: -<<<<<<< HEAD:i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-finetuning.md res = openai.ChatCompletion.create(model=fine_tuned_model, prompt=prompt, ...) -======= - res = openai.Completion.create(model=fine_tuned_model, prompt=prompt, ...) ->>>>>>> 80e3194f4edf963a4f24122b9ad4d89ec9014953:docs/guides/integrations/other/openai.md results.append(res["choices"][0]["text"]) ``` Log your results with a Table: ```python -table = wandb.Table( - columns=["prompt", "completion"], data=list(zip(my_prompts, results)) -) +table = wandb.Table(columns=['prompt', 'completion'], + data=list(zip(my_prompts, results))) ``` -## :question:Frequently Asked Questions +## Frequently Asked Questions ### How do I share my fine-tune resutls with my team in W&B? Sync all your runs to your team account with: ```shell-session -$ openai wandb sync --entity MY_TEAM_ACCOUNT +openai wandb sync --entity MY_TEAM_ACCOUNT ``` ### How can I organize my runs? @@ -208,11 +180,7 @@ Fine-tune details are logged to W&B as artifacts and can be accessed with: ```python import wandb -<<<<<<< HEAD:i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-finetuning.md ft_artifact = wandb.run.use_artifact('USERNAME/PROJECT/job_details:VERSION') -======= -artifact_job = wandb.run.use_artifact("USERNAME/PROJECT/job_details:VERSION") ->>>>>>> 80e3194f4edf963a4f24122b9ad4d89ec9014953:docs/guides/integrations/other/openai.md ``` where `VERSION` is either: @@ -237,7 +205,7 @@ This will allow complete traceability of your models. ![](/images/integrations/open_ai_faq_can_track.png) -## :books: Resources +## Resources * [OpenAI Fine-tuning Documentation](https://platform.openai.com/docs/guides/fine-tuning/) is very thorough and contains many useful tips * [Demo Colab](http://wandb.me/openai-colab) diff --git a/i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-gym.md b/i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-gym.md index 4afb9d6843..b4c34f87a2 100644 --- a/i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-gym.md +++ b/i18n/ja/docusaurus-plugin-content-docs/current/guides/integrations/other/openai-gym.md @@ -1,15 +1,21 @@ --- slug: /guides/integrations/openai-gym description: How to integrate W&B with OpenAI Gym. -displayed_sidebar: ja +displayed_sidebar: default --- # OpenAI Gym -[OpenAI Gym](https://gym.openai.com/)をお使いの場合、`gym.wrappers.Monitor`で生成された環境のビデオを自動的に記録します。[`wandb.init`](../../../ref/python/init.md)の`monitor_gym`キーワード引数を`True`に設定するか、`wandb.gym.monitor()`を呼び出してください。 +:::info +"The team that has been maintaining Gym since 2021 has moved all future development to [Gymnasium](https://github.com/Farama-Foundation/Gymnasium), a drop in replacement for Gym (import gymnasium as gym), and Gym will not be receiving any future updates." ([Source](https://github.com/openai/gym#the-team-that-has-been-maintaining-gym-since-2021-has-moved-all-future-development-to-gymnasium-a-drop-in-replacement-for-gym-import-gymnasium-as-gym-and-gym-will-not-be-receiving-any-future-updates-please-switch-over-to-gymnasium-as-soon-as-youre-able-to-do-so-if-youd-like-to-read-more-about-the-story-behind-this-switch-please-check-out-this-blog-post)) -Gymとの統合は非常に軽量です。`gym`から記録されているビデオファイルの名前を[調べ](https://github.com/wandb/wandb/blob/master/wandb/integration/gym/__init__.py#L15)、それに基づいて名前を付けたり、マッチしない場合は`"videos"`にフォールバックします。さらにコントロールが必要な場合は、手動で[ビデオを記録](../../track/log/media.md)することもできます。 +Since Gym is no longer an actively maintained project, try out our integration with Gymnasium. Learn more about it here # TODO add link. +::: -[CleanRL](https://github.com/vwxyzjn/cleanrl)の[OpenRL Benchmark](http://wandb.me/openrl-benchmark-report)は、OpenAI Gymの例でこの統合を使用しています。Gymを使って実行する方法を示すソースコード([特定のランで使用された特定のコード](https://wandb.ai/cleanrl/cleanrl.benchmark/runs/2jrqfugg/code?workspace=user-costa-huang)を含む)があります。 +If you're using [OpenAI Gym](https://gym.openai.com/) we will automatically log videos of your environment generated by `gym.wrappers.Monitor`. Just set the `monitor_gym` keyword argument to [`wandb.init`](../../../ref/python/init.md) to `True` or call `wandb.gym.monitor()`. -![詳細はこちら: http://wandb.me/openrl-benchmark-report](/images/integrations/open_ai_report_example.png) \ No newline at end of file +Our gym integration is very light. We simply [look at the name of the video file](https://github.com/wandb/wandb/blob/master/wandb/integration/gym/\_\_init\_\_.py#L15) being logged from `gym` and name it after that or fall back to `"videos"` if we don't find a match. If you want more control, you can always just manually [log a video](../../track/log/media.md). + +The [OpenRL Benchmark](http://wandb.me/openrl-benchmark-report) by[ CleanRL](https://github.com/vwxyzjn/cleanrl) uses this integration for its OpenAI Gym examples. You can find source code (including [the specific code used for specific runs](https://wandb.ai/cleanrl/cleanrl.benchmark/runs/2jrqfugg/code?workspace=user-costa-huang)) that demonstrates how to use gym with + +![Learn more here: http://wandb.me/openrl-benchmark-report](/images/integrations/open_ai_report_example.png)