Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ RUN cd /tmp && \
echo "e1045ee415162f944b6aebfe560b8fee *Miniconda3-${MINICONDA_VERSION}-Linux-x86_64.sh" | md5sum -c - && \
/bin/bash Miniconda3-${MINICONDA_VERSION}-Linux-x86_64.sh -f -b -p $CONDA_DIR && \
rm Miniconda3-${MINICONDA_VERSION}-Linux-x86_64.sh && \
$CONDA_DIR/bin/conda config --prepend channels conda-forge/label/dev && \
$CONDA_DIR/bin/conda config --system --prepend channels conda-forge/label/dev && \
$CONDA_DIR/bin/conda config --system --prepend channels conda-forge && \
$CONDA_DIR/bin/conda config --system --set auto_update_conda false && \
$CONDA_DIR/bin/conda config --system --set show_channel_urls true && \
Expand Down
8 changes: 4 additions & 4 deletions Pipfile
Original file line number Diff line number Diff line change
Expand Up @@ -5,31 +5,31 @@ name = "pypi"

[packages]
black = "==18.9b0"
chainer = "==6.0.0b1"
comet-ml = "==1.0.42"
cupy-cuda92 = "==6.0.0b1"
cython = "==0.29.2"
descartes = "==1.1.0"
geopandas = {editable = true, ref = "0.4.0-26-g9e584cc", git = "https://github.com/geopandas/geopandas.git"}
gmt = {editable = true, ref = "0.1a3-131-g9772fa3", git = "https://github.com/weiji14/gmt-python.git"}
ipython = "==7.2.0"
jupyterlab = "==0.35.4"
jupytext = "==0.8.6"
keras = "==2.2.4"
livelossplot = "==0.2.3"
matplotlib = "==3.0.2"
netcdf4 = "==1.4.1"
numpy = "==1.14.5"
onnx_chainer = "==1.3.0a1"
packaging = "==18.0"
pandas = "==0.23.4"
pyproj = "==1.9.6"
quilt = "==2.9.14"
rasterio = "==1.0.13"
requests = "==2.21.0"
scikit-image = "==0.14.1"
scikit-learn = "==0.20.2"
shapely = "==1.7a1"
tensorflow = "==1.10.1"
tensorflow-gpu = "==1.10.1"
toolz = "==0.9.0"
tornado = "==5.1.1"
tqdm = "==4.28.1"

[dev-packages]
Expand Down
517 changes: 202 additions & 315 deletions Pipfile.lock

Large diffs are not rendered by default.

182 changes: 80 additions & 102 deletions deepbedmap.ipynb

Large diffs are not rendered by default.

89 changes: 45 additions & 44 deletions deepbedmap.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
# %%
import math
import os
import typing

os.environ["CUDA_VISIBLE_DEVICES"] = ""

Expand All @@ -36,15 +37,15 @@
import skimage
import xarray as xr

import keras
import chainer

from features.environment import _load_ipynb_modules

# %% [markdown]
# ## Get bounding box of area we want to predict on

# %%
def get_image_and_bounds(filepath: str):
def get_image_and_bounds(filepath: str) -> (np.ndarray, rasterio.coords.BoundingBox):
"""
Retrieve raster image in numpy array format and
geographic bounds as (xmin, ymin, xmax, ymax)
Expand All @@ -53,8 +54,9 @@ def get_image_and_bounds(filepath: str):
groundtruth = data.z.to_masked_array()
groundtruth = np.flipud(groundtruth) # flip on y-axis...
groundtruth = np.expand_dims(
np.expand_dims(groundtruth, axis=-1), axis=0
np.expand_dims(groundtruth, axis=0), axis=0
) # add extra dimensions (batch and channel)
assert groundtruth.shape[0:2] == (1, 1) # check that shape is like (1, 1, h, w)

xmin, xmax = float(data.x.min()), float(data.x.max())
ymin, ymax = float(data.y.min()), float(data.y.max())
Expand All @@ -69,15 +71,14 @@ def get_image_and_bounds(filepath: str):
test_file = "2007tx" # "istarxx"
test_filepath = f"highres/{test_file}"
groundtruth, window_bound = get_image_and_bounds(filepath=f"{test_filepath}.nc")
print(window_bound)

# %% [markdown]
# ## Get neural network input datasets for our area of interest

# %%
def get_deepbedmap_model_inputs(
window_bound: rasterio.coords.BoundingBox, padding=1000
):
) -> typing.Dict[str, np.ndarray]:
"""
Outputs one large tile for each of
BEDMAP2, REMA and MEASURES Ice Flow Velocity
Expand All @@ -104,7 +105,11 @@ def get_deepbedmap_model_inputs(
padding=padding,
)

return X_tile, W1_tile, W2_tile
return (
np.rollaxis(X_tile, axis=3, start=1),
np.rollaxis(W1_tile, axis=3, start=1),
np.rollaxis(W2_tile, axis=3, start=1),
)


# %%
Expand All @@ -116,10 +121,10 @@ def plot_3d_view(
cm_norm: matplotlib.colors.Normalize = None,
title: str = None,
):
# Get x, y, z data
# Get x, y, z data, assuming image in NCHW format
image = img[0, :, :, :]
xx, yy = np.mgrid[0 : image.shape[0], 0 : image.shape[1]]
zz = image[:, :, 0]
xx, yy = np.mgrid[0 : image.shape[1], 0 : image.shape[2]]
zz = image[0, :, :]

# Make the 3D plot
ax.view_init(elev=elev, azim=azim)
Expand All @@ -142,11 +147,11 @@ def plot_3d_view(

# %%
fig, axarr = plt.subplots(nrows=1, ncols=3, squeeze=False, figsize=(16, 12))
axarr[0, 0].imshow(X_tile[0, :, :, 0], cmap="BrBG")
axarr[0, 0].imshow(X_tile[0, 0, :, :], cmap="BrBG")
axarr[0, 0].set_title("BEDMAP2\n(1000m resolution)")
axarr[0, 1].imshow(W1_tile[0, :, :, 0], cmap="BrBG")
axarr[0, 1].imshow(W1_tile[0, 0, :, :], cmap="BrBG")
axarr[0, 1].set_title("Reference Elevation Model of Antarctica\n(100m resolution)")
axarr[0, 2].imshow(W2_tile[0, :, :, 0], cmap="BrBG")
axarr[0, 2].imshow(W2_tile[0, 0, :, :], cmap="BrBG")
axarr[0, 2].set_title("MEaSUREs Ice Velocity\n(450m, resampled to 500m)")
plt.show()

Expand Down Expand Up @@ -183,29 +188,23 @@ def plot_3d_view(
# That way we can predict directly on an arbitrarily sized window.

# %%
def load_trained_model(model_inputs: tuple):
def load_trained_model(
filepath: str = "model/weights/srgan_generator_model_weights.npz"
):
"""
Creates a custom DeepBedMap neural network model
according to the shapes of the raster image inputs.

Also loads trained parameter weights into the model.
Builds the Generator component of the DeepBedMap neural network.
Also loads trained parameter weights into the model from a .npz file.
"""
srgan_train = _load_ipynb_modules("srgan_train.ipynb")

X_tile, W1_tile, W2_tile = model_inputs

network = srgan_train.generator_network(
input1_shape=X_tile.shape[1:],
input2_shape=W1_tile.shape[1:],
input3_shape=W2_tile.shape[1:],
)

model = keras.models.Model(
inputs=network.inputs, outputs=network.outputs, name="generator_model"
model = srgan_train.GeneratorModel(
inblock_class=srgan_train.DeepbedmapInputBlock,
resblock_class=srgan_train.ResidualBlock,
num_residual_blocks=16,
)

# Load trained neural network weights into model
model.load_weights(filepath="model/weights/srgan_generator_model_weights.hdf5")
chainer.serializers.load_npz(file=filepath, obj=model)

return model

Expand All @@ -214,20 +213,20 @@ def load_trained_model(model_inputs: tuple):
# ## Make prediction

# %%
model = load_trained_model(model_inputs=(X_tile, W1_tile, W2_tile))
Y_hat = model.predict(x=[X_tile, W1_tile, W2_tile], verbose=1)
model = load_trained_model()
Y_hat = model.forward(inputs={"x": X_tile, "w1": W1_tile, "w2": W2_tile}).array
Y_hat.shape

# %% [markdown]
# ## Plot results

# %%
fig, axarr = plt.subplots(nrows=1, ncols=3, squeeze=False, figsize=(16, 12))
axarr[0, 0].imshow(X_tile[0, :, :, 0], cmap="BrBG")
axarr[0, 0].imshow(X_tile[0, 0, :, :], cmap="BrBG")
axarr[0, 0].set_title("BEDMAP2")
axarr[0, 1].imshow(Y_hat[0, :, :, 0], cmap="BrBG")
axarr[0, 1].imshow(Y_hat[0, 0, :, :], cmap="BrBG")
axarr[0, 1].set_title("Super Resolution Generative Adversarial Network prediction")
axarr[0, 2].imshow(groundtruth[0, :, :, 0], cmap="BrBG")
axarr[0, 2].imshow(groundtruth[0, 0, :, :], cmap="BrBG")
axarr[0, 2].set_title("Groundtruth grids")
plt.show()

Expand Down Expand Up @@ -262,29 +261,31 @@ def load_trained_model(model_inputs: tuple):
# %%
def save_array_to_grid(window_bound: tuple, array: np.ndarray, outfilepath: str):
"""
Saves a numpy array to geotiff and netcdf format
Saves a numpy array to geotiff and netcdf format.
Appends ".tif" and ".nc" file extension to the outfilepath
for geotiff and netcdf outputs respectively.
"""

assert array.ndim == 4
assert array.shape[3] == 1 # check that there is only one channel
assert array.shape[1] == 1 # check that there is only one channel

transform = rasterio.transform.from_bounds(
*window_bound, height=array.shape[1], width=array.shape[2]
*window_bound, height=array.shape[2], width=array.shape[3]
)

# Save array as a GeoTiff first
with rasterio.open(
f"{outfilepath}.tif",
mode="w",
driver="GTiff",
height=array.shape[1],
width=array.shape[2],
height=array.shape[2],
width=array.shape[3],
count=1,
crs="EPSG:3031",
transform=transform,
dtype=array.dtype,
) as new_geotiff:
new_geotiff.write(array[0, :, :, 0], 1)
new_geotiff.write(array[0, 0, :, :], 1)

# Convert deepbedmap3 and cubicbedmap2 from geotiff to netcdf format
xr.open_rasterio(f"{outfilepath}.tif").to_netcdf(f"{outfilepath}.nc")
Expand All @@ -299,17 +300,17 @@ def save_array_to_grid(window_bound: tuple, array: np.ndarray, outfilepath: str)
# %%
# Save Bicubic Resampled BEDMAP2 to GeoTiff and NetCDF format
cubicbedmap2 = skimage.transform.rescale(
image=X_tile[0].astype(np.int32),
scale=4,
order=3,
image=X_tile[0, 0, :, :].astype(np.int32),
scale=4, # 4x upscaling
order=3, # cubic interpolation
mode="reflect",
anti_aliasing=True,
multichannel=True,
multichannel=False,
preserve_range=True,
)
save_array_to_grid(
window_bound=window_bound,
array=np.expand_dims(cubicbedmap2, axis=0),
array=np.expand_dims(np.expand_dims(cubicbedmap2, axis=0), axis=0),
outfilepath="model/cubicbedmap",
)

Expand Down
6 changes: 3 additions & 3 deletions environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@ channels:
- conda-forge/label/dev
- nodefaults
dependencies:
- defaults::cudnn=7.1.2[md5=4a402b88bb77e6ab2dcf3bfe6522f9cf]
- hcc::cuda_driver=390.46[md5=8fb0b6c39a9bf6128b1191db53ed903e]
- defaults::cudatoolkit=9.0[md5=5d0febed868b80a18e74077d5d0f17bc]
- defaults::cudnn=7.2.1[md5=6a84069dcf4aca8ba9493d3cb320090e]
- hcc::cuda_driver=410.73[md5=941787b750b372f4a240287634589d24]
- defaults::cudatoolkit=9.2[md5=f81c96e01ccb9028800101b35e71b844]
- gmt=6.0.0a17[md5=bea1e9a2cc29280f8ba173123f115496]
- pip=18.1[md5=d68c7e5109ba0bf4b1cfe60f0f47870a]
- conda-forge::python=3.6.6[md5=fe9f54422cdaf8779147b6a02cab2dd1]
Expand Down
11 changes: 8 additions & 3 deletions features/environment.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,10 +32,15 @@ def _load_ipynb_modules(ipynb_path: str):
source, meta = pyexporter.from_notebook_node(nb=nb)
assert isinstance(source, str)

# parse the .py string to pick out only 'import' and 'def function's
# parse the .py string to pick out only 'class', 'import' and 'def function's
parsed_code = ast.parse(source=source)
for node in parsed_code.body[:]:
if node.__class__ not in [ast.FunctionDef, ast.Import, ast.ImportFrom]:
if node.__class__ not in [
ast.ClassDef,
ast.FunctionDef,
ast.Import,
ast.ImportFrom,
]:
parsed_code.body.remove(node)
assert len(parsed_code.body) > 0

Expand Down Expand Up @@ -108,7 +113,7 @@ def _download_deepbedmap_model_weights_from_comet():

# Download the neural network weight file (hdf5 format) to the right place!
r = requests.get(url=asset_url, headers=authHeader)
open(file="model/weights/srgan_generator_model_weights.hdf5", mode="wb").write(
open(file="model/weights/srgan_generator_model_weights.npz", mode="wb").write(
r.content
)

Expand Down
14 changes: 6 additions & 8 deletions features/steps/test_deepbedmap.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,22 +29,20 @@ def get_model_input_raster_images(context):

@when("pass those images into our trained neural network model")
def predict_using_trained_neural_network(context):
model = context.deepbedmap.load_trained_model(
model_inputs=(context.X_tile, context.W1_tile, context.W2_tile)
)
context.Y_hat = model.predict(
x=[context.X_tile, context.W1_tile, context.W2_tile], verbose=0
)
model = context.deepbedmap.load_trained_model()
context.Y_hat = model.forward(
inputs={"x": context.X_tile, "w1": context.W1_tile, "w2": context.W2_tile}
).array


@then("a four times upsampled super resolution bed elevation map is returned")
def step_impl(context):
# Ensure input (X_tile) and output (Y_hat) shape is like (1, height, width, 1)
# Ensure input (X_tile) and output (Y_hat) shape is like (1, 1, height, width)
assert context.X_tile.ndim == 4
assert context.Y_hat.ndim == 4

# Check that High Resolution output shape (DeepBedMap) divided by
# Low Resolution input shape (BEDMAP2) minus 2 pixel (1km) padding
# is exactly equal to 4
assert context.Y_hat.shape[1] / (context.X_tile.shape[1] - 2) == 4.0
assert context.Y_hat.shape[2] / (context.X_tile.shape[2] - 2) == 4.0
assert context.Y_hat.shape[3] / (context.X_tile.shape[3] - 2) == 4.0
6 changes: 3 additions & 3 deletions model/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,6 @@ This folder contains the files which are directly related to the training of the
- \*_data.npy (\*hidden in git, preprocessed raster tiles from data_prep.ipynb)

- weights/
- [srgan_generator_model_architecture.json](weights/srgan_generator_model_architecture.json) (Keras model architecture of Generator Network in JSON)
- srgan_generator_model_weights.hdf5 (\*hidden in git but available at https://www.comet.ml/weiji14/deepbedmap under experiment assets, trained neural network weights)
- srgan_generator_model.hdf5 (\*hidden in git, contains both neural network model architecture and weights)
- [srgan_generator_model_architecture.onnx.txt](weights/srgan_generator_model_architecture.onnx.txt) (Chainer model architecture of Generator Network in ONNX text format)
- srgan_generator_model_architecture.onnx (\*hidden in git, Chainer model architecture of Generator Network in ONNX binary format)
- srgan_generator_model_weights.npz (\*hidden in git but available at https://www.comet.ml/weiji14/deepbedmap under experiment assets, trained neural network weights)
Loading