From 17a886cb5825cd8c26df4e65f7112d404b99fe12 Mon Sep 17 00:00:00 2001 From: Ma Zerun Date: Thu, 4 Jan 2024 20:43:27 +0800 Subject: [PATCH] Bump version to v1.2.0 (#1860) * [Fix] Fix resize mix argument bug. * Bump version to v1.2.0 * Fix UT --- README.md | 5 +++++ README_zh-CN.md | 5 +++++ docker/serve/Dockerfile | 2 +- docs/en/notes/changelog.md | 11 +++++++++++ docs/en/notes/faq.md | 3 ++- docs/zh_CN/notes/faq.md | 3 ++- mmpretrain/__init__.py | 2 +- mmpretrain/models/utils/batch_augments/resizemix.py | 2 +- mmpretrain/version.py | 2 +- requirements/mminstall.txt | 2 +- requirements/optional.txt | 2 +- tests/test_models/test_backbones/test_repmlp.py | 3 ++- 12 files changed, 33 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 78d56fc1293..5318df5b958 100644 --- a/README.md +++ b/README.md @@ -86,6 +86,11 @@ https://github.com/open-mmlab/mmpretrain/assets/26739999/e4dcd3a2-f895-4d1b-a351 ## What's new +🌟 v1.2.0 was released in 04/01/2023 + +- Support LLaVA 1.5. +- Implement of RAM with a gradio interface. + 🌟 v1.1.0 was released in 12/10/2023 - Support Mini-GPT4 training and provide a Chinese model (based on Baichuan-7B) diff --git a/README_zh-CN.md b/README_zh-CN.md index 06daeb1ce97..9ee8dffc401 100644 --- a/README_zh-CN.md +++ b/README_zh-CN.md @@ -84,6 +84,11 @@ https://github.com/open-mmlab/mmpretrain/assets/26739999/e4dcd3a2-f895-4d1b-a351 ## 更新日志 +🌟 2024/01/04 发布了 v1.2.0 版本 + +- 支持了 LLaVA 1.5 +- 实现了一个 RAM 模型的 gradio 推理例程 + 🌟 2023/10/12 发布了 v1.1.0 版本 - 支持 Mini-GPT4 训练并提供一个基于 Baichuan-7B 的中文模型 diff --git a/docker/serve/Dockerfile b/docker/serve/Dockerfile index 40ba0409db8..c50c4e8ee82 100644 --- a/docker/serve/Dockerfile +++ b/docker/serve/Dockerfile @@ -3,7 +3,7 @@ ARG CUDA="11.7" ARG CUDNN="8" FROM pytorch/torchserve:latest-gpu -ARG MMPRE="1.1.1" +ARG MMPRE="1.2.0" ENV PYTHONUNBUFFERED TRUE diff --git a/docs/en/notes/changelog.md b/docs/en/notes/changelog.md index 7a8ab6808ad..499ed24f649 100644 --- a/docs/en/notes/changelog.md +++ b/docs/en/notes/changelog.md @@ -1,5 +1,16 @@ # Changelog (MMPreTrain) +## v1.2.0(04/01/2024) + +### New Features + +- [Feature] Support LLaVA 1.5 ([#1853](https://github.com/open-mmlab/mmpretrain/pull/1853)) +- [Feature] Implement of RAM with a gradio interface. ([#1802](https://github.com/open-mmlab/mmpretrain/pull/1802)) + +### Bug Fix + +- [Fix] Fix resize mix argument bug. + ## v1.1.0(12/10/2023) ### New Features diff --git a/docs/en/notes/faq.md b/docs/en/notes/faq.md index d83e5260a9a..da45841bb10 100644 --- a/docs/en/notes/faq.md +++ b/docs/en/notes/faq.md @@ -16,7 +16,8 @@ and make sure you fill in all required information in the template. | MMPretrain version | MMEngine version | MMCV version | | :----------------: | :---------------: | :--------------: | - | 1.1.1 (main) | mmengine >= 0.8.3 | mmcv >= 2.0.0 | + | 1.2.0 (main) | mmengine >= 0.8.3 | mmcv >= 2.0.0 | + | 1.1.1 | mmengine >= 0.8.3 | mmcv >= 2.0.0 | | 1.0.0 | mmengine >= 0.8.0 | mmcv >= 2.0.0 | | 1.0.0rc8 | mmengine >= 0.7.1 | mmcv >= 2.0.0rc4 | | 1.0.0rc7 | mmengine >= 0.5.0 | mmcv >= 2.0.0rc4 | diff --git a/docs/zh_CN/notes/faq.md b/docs/zh_CN/notes/faq.md index 6a5fdc469a9..9e94cd8b1f3 100644 --- a/docs/zh_CN/notes/faq.md +++ b/docs/zh_CN/notes/faq.md @@ -13,7 +13,8 @@ | MMPretrain 版本 | MMEngine 版本 | MMCV 版本 | | :-------------: | :---------------: | :--------------: | - | 1.1.1 (main) | mmengine >= 0.8.3 | mmcv >= 2.0.0 | + | 1.2.0 (main) | mmengine >= 0.8.3 | mmcv >= 2.0.0 | + | 1.1.1 | mmengine >= 0.8.3 | mmcv >= 2.0.0 | | 1.0.0 | mmengine >= 0.8.0 | mmcv >= 2.0.0 | | 1.0.0rc8 | mmengine >= 0.7.1 | mmcv >= 2.0.0rc4 | | 1.0.0rc7 | mmengine >= 0.5.0 | mmcv >= 2.0.0rc4 | diff --git a/mmpretrain/__init__.py b/mmpretrain/__init__.py index 69c585bd26f..66866a86277 100644 --- a/mmpretrain/__init__.py +++ b/mmpretrain/__init__.py @@ -7,7 +7,7 @@ from .version import __version__ mmcv_minimum_version = '2.0.0' -mmcv_maximum_version = '2.2.0' +mmcv_maximum_version = '2.4.0' mmcv_version = digit_version(mmcv.__version__) mmengine_minimum_version = '0.8.3' diff --git a/mmpretrain/models/utils/batch_augments/resizemix.py b/mmpretrain/models/utils/batch_augments/resizemix.py index 89cfb72033e..c70f81b3d61 100644 --- a/mmpretrain/models/utils/batch_augments/resizemix.py +++ b/mmpretrain/models/utils/batch_augments/resizemix.py @@ -87,7 +87,7 @@ def mix(self, batch_inputs: torch.Tensor, (y1, y2, x1, x2), lam = self.cutmix_bbox_and_lam(img_shape, lam) batch_inputs[:, :, y1:y2, x1:x2] = F.interpolate( batch_inputs[index], - size=(y2 - y1, x2 - x1), + size=(int(y2 - y1), int(x2 - x1)), mode=self.interpolation, align_corners=False) mixed_scores = lam * batch_scores + (1 - lam) * batch_scores[index, :] diff --git a/mmpretrain/version.py b/mmpretrain/version.py index 8f8c8b7f0a9..1822b7f272b 100644 --- a/mmpretrain/version.py +++ b/mmpretrain/version.py @@ -1,6 +1,6 @@ # Copyright (c) OpenMMLab. All rights reserved -__version__ = '1.1.1' +__version__ = '1.2.0' def parse_version_info(version_str): diff --git a/requirements/mminstall.txt b/requirements/mminstall.txt index 197701a1a0b..9b736b028bd 100644 --- a/requirements/mminstall.txt +++ b/requirements/mminstall.txt @@ -1,2 +1,2 @@ -mmcv>=2.0.0,<2.3.0 +mmcv>=2.0.0,<2.4.0 mmengine>=0.8.3,<1.0.0 diff --git a/requirements/optional.txt b/requirements/optional.txt index 85853cda452..5f31808f14b 100644 --- a/requirements/optional.txt +++ b/requirements/optional.txt @@ -1,4 +1,4 @@ albumentations>=0.3.2 --no-binary qudida,albumentations # For Albumentations data transform -grad-cam >= 1.3.7 # For CAM visualization +grad-cam >= 1.3.7,<1.5.0 # For CAM visualization requests # For torchserve scikit-learn # For t-SNE visualization and unit tests. diff --git a/tests/test_models/test_backbones/test_repmlp.py b/tests/test_models/test_backbones/test_repmlp.py index bfcb5dfccb1..f03fce4ed41 100644 --- a/tests/test_models/test_backbones/test_repmlp.py +++ b/tests/test_models/test_backbones/test_repmlp.py @@ -169,4 +169,5 @@ def test_deploy_(self): assert len(feats_) == len(feats__) for i in range(len(feats)): - self.assertTrue(torch.allclose(feats__[i], feats_[i])) + self.assertTrue( + torch.allclose(feats__[i], feats_[i], rtol=0.01, atol=0.01))