diff --git a/example/ssd/README.md b/example/ssd/README.md index a54871c2790e..57b29bcf2e5a 100644 --- a/example/ssd/README.md +++ b/example/ssd/README.md @@ -54,7 +54,7 @@ Remember to enable CUDA if you want to be able to train, since CPU training is insanely slow. Using CUDNN is optional. ### Try the demo -* Download the pretrained model: [`ssd_300.zip`](https://dl.dropboxusercontent.com/u/39265872/ssd_300_vgg16_reduced_voc0712_trainval.zip), and extract to `model/` directory. (This model is converted from VGG_VOC0712_SSD_300x300_iter_60000.caffemodel provided by paper author). +* Download the pretrained model: [`ssd_300.zip`](https://github.com/zhreshold/mxnet-ssd/releases/download/v0.2-alpha/ssd_300_vgg16_reduced_voc0712_trainval.zip), and extract to `model/` directory. (This model is converted from VGG_VOC0712_SSD_300x300_iter_60000.caffemodel provided by paper author). * Run ``` # cd /path/to/mxnet/example/ssd/ @@ -71,7 +71,7 @@ python demo.py --epoch 0 --images ./data/demo/dog.jpg --thresh 0.5 This example only covers training on Pascal VOC dataset. Other datasets should be easily supported by adding subclass derived from class `Imdb` in `dataset/imdb.py`. See example of `dataset/pascal_voc.py` for details. -* Download the converted pretrained `vgg16_reduced` model [here](https://dl.dropboxusercontent.com/u/39265872/vgg16_reduced.zip), unzip `.param` and `.json` files +* Download the converted pretrained `vgg16_reduced` model [here](https://github.com/zhreshold/mxnet-ssd/releases/download/v0.2-beta/vgg16_reduced.zip), unzip `.param` and `.json` files into `model/` directory by default. * Download the PASCAL VOC dataset, skip this step if you already have one. ``` diff --git a/example/ssd/detect/detector.py b/example/ssd/detect/detector.py index d9ee10c541d7..206eafaac1d5 100644 --- a/example/ssd/detect/detector.py +++ b/example/ssd/detect/detector.py @@ -32,7 +32,7 @@ def __init__(self, symbol, model_prefix, epoch, data_shape, mean_pixels, \ if self.ctx is None: self.ctx = mx.cpu() _, args, auxs = mx.model.load_checkpoint(model_prefix, epoch) - self.mod = mx.mod.Module(symbol, context=ctx) + self.mod = mx.mod.Module(symbol, label_names=None, context=ctx) self.data_shape = data_shape self.mod.bind(data_shapes=[('data', (batch_size, 3, data_shape, data_shape))]) self.mod.set_params(args, auxs)