diff --git a/configs/cori/config.20171201.default.GMPAS-IAF.T62_oRRS30to10v3wLI.cori-knl b/configs/cori/config.20171201.default.GMPAS-IAF.T62_oRRS30to10v3wLI.cori-knl new file mode 100644 index 000000000..0043afc35 --- /dev/null +++ b/configs/cori/config.20171201.default.GMPAS-IAF.T62_oRRS30to10v3wLI.cori-knl @@ -0,0 +1,171 @@ +[runs] +## options related to the run to be analyzed and reference runs to be +## compared against + +# mainRunName is a name that identifies the simulation being analyzed. +mainRunName = 20171201.default.GMPAS-IAF.T62_oRRS30to10v3wLI.cori-knl +# preprocessedReferenceRunName is the name of a reference run that has been +# preprocessed to compare against (or None to turn off comparison). Reference +# runs of this type would have preprocessed results because they were not +# performed with MPAS components (so they cannot be easily ingested by +# MPAS-Analysis) +preprocessedReferenceRunName = B1850C5_ne30_v0.4 + +[execute] +## options related to executing parallel tasks + +# the number of parallel tasks (1 means tasks run in serial, the default) +parallelTaskCount = 1 + +# the parallelism mode in ncclimo ("serial" or "bck") +# Set this to "bck" (background parallelism) if running on a machine that can +# handle 12 simultaneous processes, one for each monthly climatology. +ncclimoParallelMode = serial + +[input] +## options related to reading in the results to be analyzed + +# directory containing model results +baseDirectory = /global/cscratch1/sd/mpeterse/acme_scratch/20171201.default.GMPAS-IAF.T62_oRRS30to10v3wLI.cori-knl/run + +# names of ocean and sea ice meshes (e.g. EC60to30, QU240, RRS30to10, etc.) +mpasMeshName = oRRS30to10v3wLI + +# Directory for mapping files (if they have been generated already). If mapping +# files needed by the analysis are not found here, they will be generated and +# placed in the output mappingSubdirectory +mappingDirectory = /global/project/projectdirs/acme/mpas_analysis/mapping + +[output] +## options related to writing out plots, intermediate cached data sets, logs, +## etc. + +# directory where analysis should be written +baseDirectory = /dir/to/analysis/output + +# provide an absolute path to put HTML in an alternative location (e.g. a web +# portal) +# htmlSubdirectory = /global/project/projectdirs/acme/www/USERNAME/RUNNAME +htmlSubdirectory = html + +# a list of analyses to generate. Valid names can be seen by running: +# ./run_mpas_analysis --list +# This command also lists tags for each analysis. +# Shortcuts exist to generate (or not generate) several types of analysis. +# These include: +# 'all' -- all analyses will be run +# 'all_' -- all analysis with a particular tag will be run +# 'all_' -- all analyses from a given component (either 'ocean' +# or 'seaIce') will be run +# 'no_' -- skip the given task +# 'no_', 'no_' -- in analogy to 'all_*', skip all analysis +# tasks from the given compoonent or with +# the given tag. Do +# ./run_mpas_analysis --list +# to list all task names and their tags +# an equivalent syntax can be used on the command line to override this +# option: +# ./run_mpas_analysis config.analysis --generate \ +# all,no_ocean,all_timeSeries +generate = ['all'] + +[climatology] +## options related to producing climatologies, typically to compare against +## observations and previous runs + +# the first year over which to average climatalogies +startYear = 5 +# the last year over which to average climatalogies +endYear = 10 + +[timeSeries] +## options related to producing time series plots, often to compare against +## observations and previous runs + +# start and end years for timeseries analysis. Using out-of-bounds values +# like start_year = 1 and end_year = 9999 will be clipped to the valid range +# of years, and is a good way of insuring that all values are used. +startYear = 1 +endYear = 10 + +[index] +## options related to producing nino index. + +# start and end years for the nino 3.4 analysis. Using out-of-bounds values +# like start_year = 1 and end_year = 9999 will be clipped to the valid range +# of years, and is a good way of insuring that all values are used. +# For valid statistics, index times should include at least 30 years +startYear = 1 +endYear = 10 + +[oceanObservations] +## options related to ocean observations with which the results will be compared + +# directory where ocean observations are stored +baseDirectory = /global/project/projectdirs/acme/observations/Ocean/ +sstSubdirectory = SST +sssSubdirectory = SSS +mldSubdirectory = MLD +ninoSubdirectory = Nino +mhtSubdirectory = MHT +meltSubdirectory = Melt +soseSubdirectory = SOSE + +[oceanPreprocessedReference] +## options related to preprocessed ocean reference run with which the results +## will be compared (e.g. a POP, CESM or ACME v0 run) + +# directory where ocean reference simulation results are stored +baseDirectory = /global/project/projectdirs/acme/ACMEv0_lowres/B1850C5_ne30_v0.4/ocn/postprocessing + +[seaIceObservations] +## options related to sea ice observations with which the results will be +## compared + +# directory where sea ice observations are stored +baseDirectory = /global/project/projectdirs/acme/observations/SeaIce + +[seaIcePreprocessedReference] +## options related to preprocessed sea ice reference run with which the results +## will be compared (e.g. a CICE, CESM or ACME v0 run) + +# directory where ocean reference simulation results are stored +baseDirectory = /global/project/projectdirs/acme/ACMEv0_lowres/B1850C5_ne30_v0.4/ice/postprocessing + +[climatologyMapSoseTemperature] +## options related to plotting climatology maps of Antarctic +## potential temperature at various levels, including the sea floor against +## reference model results and SOSE reanalysis data + +# Times for comparison times (Jan, Feb, Mar, Apr, May, Jun, Jul, Aug, Sep, Oct, +# Nov, Dec, JFM, AMJ, JAS, OND, ANN) +seasons = ['ANN'] + +# list of depths in meters (positive up) at which to analyze, 'top' for the +# sea surface, 'bot' for the sea floor +depths = ['top', -200, -400, -600, -800, 'bot'] + +[climatologyMapSoseSalinity] +## options related to plotting climatology maps of Antarctic +## salinity at various levels, including the sea floor against +## reference model results and SOSE reanalysis data + +# Times for comparison times (Jan, Feb, Mar, Apr, May, Jun, Jul, Aug, Sep, Oct, +# Nov, Dec, JFM, AMJ, JAS, OND, ANN) +seasons = ['ANN'] + +# list of depths in meters (positive up) at which to analyze, 'top' for the +# sea surface, 'bot' for the sea floor +depths = ['top', -200, -400, -600, -800, 'bot'] + +[timeSeriesSeaIceAreaVol] +## options related to plotting time series of sea ice area and volume + +# plot on polar plot +polarPlot = False + +[regions] +## options related to ocean regions used in several analysis modules + +# Directory for region mask files +regionMaskDirectory = /global/project/projectdirs/acme/mpas_analysis/region_masks diff --git a/configs/cori/job_script.cori-haswell.bash b/configs/cori/job_script.cori-haswell.bash new file mode 100644 index 000000000..f40bd3d59 --- /dev/null +++ b/configs/cori/job_script.cori-haswell.bash @@ -0,0 +1,42 @@ +#!/bin/bash -l + +# comment out if using debug queue +#SBATCH --partition=regular +# comment in to get premium queue +##SBATCH --qos=premium +# comment in to get the debug queue +##SBATCH --partition=debug +# comment in when run on cori haswell or knl +#SBATCH -C haswell +#SBATCH --nodes=1 +#SBATCH --time=1:00:00 +#SBATCH --account=acme +#SBATCH --job-name=mpas_analysis +#SBATCH --output=mpas_analysis.o%j +#SBATCH --error=mpas_analysis.e%j +#SBATCH -L cscratch1,SCRATCH,project + +cd $SLURM_SUBMIT_DIR # optional, since this is the default behavior + +export OMP_NUM_THREADS=1 + +module unload python python/base +module use /global/project/projectdirs/acme/software/modulefiles/all +module load python/anaconda-2.7-acme +export PATH=/global/homes/z/zender/bin_cori:${PATH} + +# MPAS/ACME job to be analyzed, including paths to simulation data and +# observations. Change this name and path as needed +run_config_file="config.run_name_here" + +if [ ! -f $run_config_file ]; then + echo "File $run_config_file not found!" + exit 1 +fi +if [ ! -f ./run_analysis.py ]; then + echo "run_analysis.py not found in current directory!" + exit 1 +fi + +srun -N 1 -n 1 ./run_analysis.py $run_config_file + diff --git a/configs/cori/job_script.cori-knl.bash b/configs/cori/job_script.cori-knl.bash new file mode 100644 index 000000000..c21ef7b61 --- /dev/null +++ b/configs/cori/job_script.cori-knl.bash @@ -0,0 +1,42 @@ +#!/bin/bash -l + +# comment out if using debug queue +#SBATCH --partition=regular +# comment in to get premium queue +##SBATCH --qos=premium +# comment in to get the debug queue +##SBATCH --partition=debug +# comment in when run on cori haswell or knl +#SBATCH -C knl +#SBATCH --nodes=1 +#SBATCH --time=1:00:00 +#SBATCH --account=acme +#SBATCH --job-name=mpas_analysis +#SBATCH --output=mpas_analysis.o%j +#SBATCH --error=mpas_analysis.e%j +#SBATCH -L cscratch1,SCRATCH,project + +cd $SLURM_SUBMIT_DIR # optional, since this is the default behavior + +export OMP_NUM_THREADS=1 + +module unload python python/base +module use /global/project/projectdirs/acme/software/modulefiles/all +module load python/anaconda-2.7-acme +export PATH=/global/homes/z/zender/bin_cori:${PATH} + +# MPAS/ACME job to be analyzed, including paths to simulation data and +# observations. Change this name and path as needed +run_config_file="config.run_name_here" + +if [ ! -f $run_config_file ]; then + echo "File $run_config_file not found!" + exit 1 +fi +if [ ! -f ./run_analysis.py ]; then + echo "run_analysis.py not found in current directory!" + exit 1 +fi + +srun -N 1 -n 1 ./run_analysis.py $run_config_file + diff --git a/configs/edison/config.B_low_res_ice_shelves_1696_JWolfe_layout_Edison b/configs/edison/config.B_low_res_ice_shelves_1696_JWolfe_layout_Edison index e134103f3..31fb86e4b 100644 --- a/configs/edison/config.B_low_res_ice_shelves_1696_JWolfe_layout_Edison +++ b/configs/edison/config.B_low_res_ice_shelves_1696_JWolfe_layout_Edison @@ -136,6 +136,32 @@ baseDirectory = /global/project/projectdirs/acme/observations/SeaIce # directory where ocean reference simulation results are stored baseDirectory = /global/project/projectdirs/acme/ACMEv0_lowres/B1850C5_ne30_v0.4/ice/postprocessing +[climatologyMapSoseTemperature] +## options related to plotting climatology maps of Antarctic +## potential temperature at various levels, including the sea floor against +## reference model results and SOSE reanalysis data + +# Times for comparison times (Jan, Feb, Mar, Apr, May, Jun, Jul, Aug, Sep, Oct, +# Nov, Dec, JFM, AMJ, JAS, OND, ANN) +seasons = ['ANN'] + +# list of depths in meters (positive up) at which to analyze, 'top' for the +# sea surface, 'bot' for the sea floor +depths = ['top', -200, -400, -600, -800, 'bot'] + +[climatologyMapSoseSalinity] +## options related to plotting climatology maps of Antarctic +## salinity at various levels, including the sea floor against +## reference model results and SOSE reanalysis data + +# Times for comparison times (Jan, Feb, Mar, Apr, May, Jun, Jul, Aug, Sep, Oct, +# Nov, Dec, JFM, AMJ, JAS, OND, ANN) +seasons = ['ANN'] + +# list of depths in meters (positive up) at which to analyze, 'top' for the +# sea surface, 'bot' for the sea floor +depths = ['top', -200, -400, -600, -800, 'bot'] + [timeSeriesSeaIceAreaVol] ## options related to plotting time series of sea ice area and volume diff --git a/configs/theta/config.20171031.tenYearTest.GMPAS-IAF.T62_oEC60to30v3wLI.60layer.theta b/configs/theta/config.20171031.tenYearTest.GMPAS-IAF.T62_oEC60to30v3wLI.60layer.theta index 8524edd69..e295f0475 100644 --- a/configs/theta/config.20171031.tenYearTest.GMPAS-IAF.T62_oEC60to30v3wLI.60layer.theta +++ b/configs/theta/config.20171031.tenYearTest.GMPAS-IAF.T62_oEC60to30v3wLI.60layer.theta @@ -143,22 +143,31 @@ polarPlot = False # Directory containing mask files for ocean basins and ice shelves regionMaskDirectory = /projects/OceanClimate/mpas-analysis_data/mpas_analysis/region_masks + [climatologyMapSoseTemperature] +## options related to plotting climatology maps of Antarctic +## potential temperature at various levels, including the sea floor against +## reference model results and SOSE reanalysis data + # Times for comparison times (Jan, Feb, Mar, Apr, May, Jun, Jul, Aug, Sep, Oct, # Nov, Dec, JFM, AMJ, JAS, OND, ANN) seasons = ['JFM', 'JAS', 'ANN'] -# list of depths in meters (positive up) at which to analyze, 'bot' for the -# bottom of the ocean +# list of depths in meters (positive up) at which to analyze, 'top' for the +# sea surface, 'bot' for the sea floor depths = ['top', -200, -400, -600, -800, 'bot'] [climatologyMapSoseSalinity] +## options related to plotting climatology maps of Antarctic +## salinity at various levels, including the sea floor against +## reference model results and SOSE reanalysis data + # Times for comparison times (Jan, Feb, Mar, Apr, May, Jun, Jul, Aug, Sep, Oct, # Nov, Dec, JFM, AMJ, JAS, OND, ANN) seasons = ['JFM', 'JAS', 'ANN'] -# list of depths in meters (positive up) at which to analyze, 'bot' for the -# bottom of the ocean +# list of depths in meters (positive up) at which to analyze, 'top' for the +# sea surface, 'bot' for the sea floor depths = ['top', -200, -400, -600, -800, 'bot'] [timeSeriesAntarcticMelt] diff --git a/mpas_analysis/config.default b/mpas_analysis/config.default index 96434ccbe..e9ca8708e 100644 --- a/mpas_analysis/config.default +++ b/mpas_analysis/config.default @@ -207,6 +207,7 @@ mldSubdirectory = MLD ninoSubdirectory = Nino mhtSubdirectory = MHT meltSubdirectory = Melt +soseSubdirectory = SOSE # first and last year of SST observational climatology (preferably one of the # two ranges given below) @@ -604,6 +605,77 @@ normArgsDifference = {'linthresh': 1., 'linscale': 0.5, 'vmin': -100., colorbarTicksDifference = [-100., -50., -20., -10., -5., -2., -1., 0., 1., 2., 5., 10., 20., 50., 100.] +[climatologyMapSoseTemperature] +## options related to plotting climatology maps of Antarctic +## potential temperature at various levels, including the sea floor against +## reference model results and SOSE reanalysis data + +# comparison grid(s) +# only the Antarctic really makes sense but lat-lon could technically work. +comparisonGrids = ['antarctic'] + +# Times for comparison times (Jan, Feb, Mar, Apr, May, Jun, Jul, Aug, Sep, Oct, +# Nov, Dec, JFM, AMJ, JAS, OND, ANN) +seasons = ['ANN'] + +# list of depths in meters (positive up) at which to analyze, 'top' for the +# sea surface, 'bot' for the sea floor +depths = ['top', -200, -400, -600, -800, 'bot'] + + +# colormap for model/observations +colormapNameResult = RdBu_r +# the type of norm used in the colormap +normTypeResult = linear +# A dictionary with keywords for the SemiLogNorm +normArgsResult = {'vmin': -2., 'vmax': 2.} +# place the ticks automatically by default +# colorbarTicksResult = numpy.linspace(-2., 2., 9) + +# colormap for differences +colormapNameDifference = RdBu_r +# the type of norm used in the colormap +normTypeDifference = linear +# A dictionary with keywords for the SemiLogNorm +normArgsDifference = {'vmin': -2., 'vmax': 2.} +# place the ticks automatically by default +# colorbarTicksDifference = numpy.linspace(-2., 2., 9) + +[climatologyMapSoseSalinity] +## options related to plotting climatology maps of Antarctic +## salinity at various levels, including the sea floor against +## reference model results and SOSE reanalysis data + +# comparison grid(s) +# only the Antarctic really makes sense but lat-lon could technically work. +comparisonGrids = ['antarctic'] + +# Times for comparison times (Jan, Feb, Mar, Apr, May, Jun, Jul, Aug, Sep, Oct, +# Nov, Dec, JFM, AMJ, JAS, OND, ANN) +seasons = ['ANN'] + +# list of depths in meters (positive up) at which to analyze, 'top' for the +# sea surface, 'bot' for the sea floor +depths = ['top', -200, -400, -600, -800, 'bot'] + +# colormap for model/observations +colormapNameResult = BuOr +# the type of norm used in the colormap +normTypeResult = linear +# A dictionary with keywords for the SemiLogNorm +normArgsResult = {'vmin': 33.8, 'vmax': 35.0} +# place the ticks automatically by default +# colorbarTicksResult = numpy.linspace(34.2, 35.2, 9) + +# colormap for differences +colormapNameDifference = RdBu_r +# the type of norm used in the colormap +normTypeDifference = linear +# A dictionary with keywords for the SemiLogNorm +normArgsDifference = {'vmin': -0.5, 'vmax': 0.5} +# place the ticks automatically by default +# colorbarTicksDifference = numpy.linspace(-0.5, 0.5, 9) + [climatologyMapSeaIceConcNH] ## options related to plotting horizontally remapped climatologies of ## sea ice concentration against reference model results and observations diff --git a/mpas_analysis/ocean/__init__.py b/mpas_analysis/ocean/__init__.py index ce6960b1a..a9465cc98 100644 --- a/mpas_analysis/ocean/__init__.py +++ b/mpas_analysis/ocean/__init__.py @@ -2,6 +2,8 @@ from .climatology_map_mld import ClimatologyMapMLD from .climatology_map_sss import ClimatologyMapSSS from .climatology_map_antarctic_melt import ClimatologyMapAntarcticMelt +from .climatology_map_sose import ClimatologyMapSoseTemperature, \ + ClimatologyMapSoseSalinity from .time_series_ohc import TimeSeriesOHC from .time_series_sst import TimeSeriesSST diff --git a/mpas_analysis/ocean/climatology_map_antarctic_melt.py b/mpas_analysis/ocean/climatology_map_antarctic_melt.py index acc2b9706..37a0b2f8b 100644 --- a/mpas_analysis/ocean/climatology_map_antarctic_melt.py +++ b/mpas_analysis/ocean/climatology_map_antarctic_melt.py @@ -225,7 +225,7 @@ class RemapObservedAntarcticMeltClimatology(RemapObservedClimatologySubtask): Authors ------- - Luke Van Roekel, Xylar Asay-Davis, Milena Veneziani + Xylar Asay-Davis """ def get_observation_descriptor(self, fileName): # {{{ diff --git a/mpas_analysis/ocean/climatology_map_sose.py b/mpas_analysis/ocean/climatology_map_sose.py new file mode 100644 index 000000000..5fa894443 --- /dev/null +++ b/mpas_analysis/ocean/climatology_map_sose.py @@ -0,0 +1,409 @@ +''' +Analysis tasks for comparing Antarctic climatology maps against observations +and reanalysis data. + +Authors +------- +Xylar Asay-Davis +''' + +import xarray as xr + +from ..shared import AnalysisTask + +from .remap_depth_slices_subtask import RemapDepthSlicesSubtask + +from .plot_climatology_map_subtask import PlotClimatologyMapSubtask + +from ..shared.io.utility import build_config_full_path + +from ..shared.climatology import RemapObservedClimatologySubtask, \ + get_antarctic_stereographic_projection + +from ..shared.grid import ProjectionGridDescriptor + +from ..shared.mpas_xarray import mpas_xarray + + +class ClimatologyMapSoseTemperature(AnalysisTask): # {{{ + """ + An analysis task for comparison of antarctic temperature against SOSE + fields + + Authors + ------- + Xylar Asay-Davis + """ + + def __init__(self, config, mpasClimatologyTask): # {{{ + """ + Construct the analysis task. + + Parameters + ---------- + config : instance of MpasAnalysisConfigParser + Contains configuration options + + mpasClimatologyTask : ``MpasClimatologyTask`` + The task that produced the climatology to be remapped and plotted + + Authors + ------- + Xylar Asay-Davis + """ + fieldName = 'temperature' + # call the constructor from the base class (AnalysisTask) + super(ClimatologyMapSoseTemperature, self).__init__( + config=config, taskName='climatologyMapSoseTemperature', + componentName='ocean', + tags=['climatology', 'horizontalMap', 'sose', fieldName]) + + sectionName = self.taskName + + mpasFieldName = 'timeMonthly_avg_activeTracers_temperature' + iselValues = None + + observationTitleLabel = 'State Estimate (SOSE)' + + observationsDirectory = build_config_full_path( + config, 'oceanObservations', 'soseSubdirectory') + + obsFileName = \ + '{}/SOSE_2005-2010_monthly_pot_temp_6000.0x' \ + '6000.0km_10.0km_Antarctic_stereo.nc'.format(observationsDirectory) + obsFieldName = 'theta' + + # read in what seasons we want to plot + seasons = config.getExpression(sectionName, 'seasons') + + if len(seasons) == 0: + raise ValueError('config section {} does not contain valid list ' + 'of seasons'.format(sectionName)) + + comparisonGridNames = config.getExpression(sectionName, + 'comparisonGrids') + + if len(comparisonGridNames) == 0: + raise ValueError('config section {} does not contain valid list ' + 'of comparison grids'.format(sectionName)) + + depths = config.getExpression(sectionName, 'depths') + + if len(depths) == 0: + raise ValueError('config section {} does not contain valid ' + 'list of depths'.format(sectionName)) + + # the variable 'timeMonthly_avg_landIceFreshwaterFlux' will be added to + # mpasClimatologyTask along with the seasons. + remapClimatologySubtask = RemapDepthSlicesSubtask( + mpasClimatologyTask=mpasClimatologyTask, + parentTask=self, + climatologyName=fieldName, + variableList=[mpasFieldName], + seasons=seasons, + depths=depths, + comparisonGridNames=comparisonGridNames, + iselValues=iselValues) + + remapObservationsSubtask = RemapSoseClimatology( + parentTask=self, seasons=seasons, fileName=obsFileName, + outFilePrefix=obsFieldName, + fieldName=obsFieldName, + botFieldName='botTheta', + depths=depths, + comparisonGridNames=comparisonGridNames) + self.add_subtask(remapObservationsSubtask) + for comparisonGridName in comparisonGridNames: + for season in seasons: + for depth in depths: + subtask = PlotClimatologyMapSubtask( + parentTask=self, + season=season, + comparisonGridName=comparisonGridName, + remapMpasClimatologySubtask=remapClimatologySubtask, + remapObsClimatologySubtask=remapObservationsSubtask, + depth=depth) + + subtask.set_plot_info( + outFileLabel='tempSOSE', + fieldNameInTitle='Temperature', + mpasFieldName=mpasFieldName, + obsFieldName=obsFieldName, + observationTitleLabel=observationTitleLabel, + diffTitleLabel='Model - State Estimate', + unitsLabel=r'$^\circ$C', + imageCaption='Temperature', + galleryGroup='Temperature', + groupSubtitle=None, + groupLink='temp', + galleryName='State Estimate: SOSE') + + self.add_subtask(subtask) + # }}} + + # }}} + + +class ClimatologyMapSoseSalinity(AnalysisTask): # {{{ + """ + An analysis task for comparison of antarctic salinity against SOSE + fields + + Authors + ------- + Xylar Asay-Davis + """ + + def __init__(self, config, mpasClimatologyTask): # {{{ + """ + Construct the analysis task. + + Parameters + ---------- + config : instance of MpasAnalysisConfigParser + Contains configuration options + + mpasClimatologyTask : ``MpasClimatologyTask`` + The task that produced the climatology to be remapped and plotted + + Authors + ------- + Xylar Asay-Davis + """ + fieldName = 'salinity' + # call the constructor from the base class (AnalysisTask) + super(ClimatologyMapSoseSalinity, self).__init__( + config=config, taskName='climatologyMapSoseSalinity', + componentName='ocean', + tags=['climatology', 'horizontalMap', 'sose', fieldName]) + + sectionName = self.taskName + + mpasFieldName = 'timeMonthly_avg_activeTracers_salinity' + iselValues = None + + observationTitleLabel = 'State Estimate (SOSE)' + + observationsDirectory = build_config_full_path( + config, 'oceanObservations', 'soseSubdirectory') + + obsFileName = \ + '{}/SOSE_2005-2010_monthly_salinity_6000.0x' \ + '6000.0km_10.0km_Antarctic_stereo.nc'.format(observationsDirectory) + obsFieldName = 'salinity' + + # read in what seasons we want to plot + seasons = config.getExpression(sectionName, 'seasons') + + if len(seasons) == 0: + raise ValueError('config section {} does not contain valid list ' + 'of seasons'.format(sectionName)) + + comparisonGridNames = config.getExpression(sectionName, + 'comparisonGrids') + + if len(comparisonGridNames) == 0: + raise ValueError('config section {} does not contain valid list ' + 'of comparison grids'.format(sectionName)) + + depths = config.getExpression(sectionName, 'depths') + + if len(depths) == 0: + raise ValueError('config section {} does not contain valid ' + 'list of depths'.format(sectionName)) + + # the variable 'timeMonthly_avg_landIceFreshwaterFlux' will be added to + # mpasClimatologyTask along with the seasons. + remapClimatologySubtask = RemapDepthSlicesSubtask( + mpasClimatologyTask=mpasClimatologyTask, + parentTask=self, + climatologyName=fieldName, + variableList=[mpasFieldName], + seasons=seasons, + depths=depths, + comparisonGridNames=comparisonGridNames, + iselValues=iselValues) + + remapObservationsSubtask = RemapSoseClimatology( + parentTask=self, seasons=seasons, fileName=obsFileName, + outFilePrefix=obsFieldName, + fieldName=obsFieldName, + botFieldName='botSalinity', + depths=depths, + comparisonGridNames=comparisonGridNames) + self.add_subtask(remapObservationsSubtask) + for comparisonGridName in comparisonGridNames: + for season in seasons: + for depth in depths: + subtask = PlotClimatologyMapSubtask( + parentTask=self, + season=season, + comparisonGridName=comparisonGridName, + remapMpasClimatologySubtask=remapClimatologySubtask, + remapObsClimatologySubtask=remapObservationsSubtask, + depth=depth) + + subtask.set_plot_info( + outFileLabel='salinSOSE', + fieldNameInTitle='Salinity', + mpasFieldName=mpasFieldName, + obsFieldName=obsFieldName, + observationTitleLabel=observationTitleLabel, + diffTitleLabel='Model - State Estimate', + unitsLabel=r'PSU', + imageCaption='Salinity', + galleryGroup='Salinity', + groupSubtitle=None, + groupLink='salin', + galleryName='State Estimate: SOSE') + + self.add_subtask(subtask) + # }}} + + # }}} + + +class RemapSoseClimatology(RemapObservedClimatologySubtask): + # {{{ + """ + A subtask for reading and remapping SOSE fields to the comparison grid + + Authors + ------- + Xylar Asay-Davis + """ + + def __init__(self, parentTask, seasons, fileName, outFilePrefix, + fieldName, botFieldName, depths, + comparisonGridNames=['latlon'], + subtaskName='remapObservations'): + # {{{ + ''' + Construct one analysis subtask for each plot (i.e. each season and + comparison grid) and a subtask for computing climatologies. + + Parameters + ---------- + parentTask : ``AnalysisTask`` + The parent (master) task for this subtask + + seasons : list of str + A list of seasons (keys in ``constants.monthDictionary``) over + which the climatology should be computed. + + fileName : str + The name of the observation file + + outFilePrefix : str + The prefix in front of output files and mapping files, typically + the name of the field being remapped + + fieldName : str + The name of the 3D field to remap + + botFieldName : str + The name of the same field as ``fieldName`` but sampled at the + sea floor + + depths : list of {None, float, 'top', 'bot'} + A list of depths at which the climatology will be sliced in the + vertical. + + comparisonGridNames : list of {'latlon', 'antarctic'}, optional + The name(s) of the comparison grid to use for remapping. + + subtaskName : str, optional + The name of the subtask + + Authors + ------- + Xylar Asay-Davis + + ''' + + self.fieldName = fieldName + self.botFieldName = botFieldName + self.depths = depths + + # call the constructor from the base class (AnalysisTask) + super(RemapSoseClimatology, self).__init__( + parentTask, seasons, fileName, outFilePrefix, + comparisonGridNames, subtaskName) + # }}} + + def get_observation_descriptor(self, fileName): # {{{ + ''' + get a MeshDescriptor for the observation grid + + Parameters + ---------- + fileName : str + observation file name describing the source grid + + Returns + ------- + obsDescriptor : ``MeshDescriptor`` + The descriptor for the observation grid + + Authors + ------- + Xylar Asay-Davis + ''' + + # create a descriptor of the observation grid using the x/y polar + # stereographic coordinates + projection = get_antarctic_stereographic_projection() + obsDescriptor = ProjectionGridDescriptor.read( + projection, fileName=fileName, xVarName='x', yVarName='y') + return obsDescriptor # }}} + + def build_observational_dataset(self, fileName): # {{{ + ''' + read in the data sets for observations, and possibly rename some + variables and dimensions + + Parameters + ---------- + fileName : str + observation file name + + Returns + ------- + dsObs : ``xarray.Dataset`` + The observational dataset + + Authors + ------- + Xylar Asay-Davis + ''' + + # Load MLD observational data + dsObs = xr.open_dataset(fileName) + + dsObs = mpas_xarray.subset_variables(dsObs, [self.fieldName, + self.botFieldName, + 'month', 'year']) + slices = [] + field = dsObs[self.fieldName] + botField = dsObs[self.botFieldName] + for depth in self.depths: + if depth == 'top': + slices.append(field.sel(method='nearest', depth=0.).drop( + 'depth')) + elif depth == 'bot': + slices.append(botField) + else: + slices.append(field.sel(method='nearest', depth=depth).drop( + 'depth')) + + depthNames = [str(depth) for depth in self.depths] + field = xr.concat(slices, dim='depthSlice') + + dsObs = xr.Dataset(data_vars={self.fieldName: field}, + coords={'depthSlice': depthNames}) + + return dsObs # }}} + + # }}} + +# vim: foldmethod=marker ai ts=4 sts=4 et sw=4 ft=python diff --git a/mpas_analysis/ocean/plot_climatology_map_subtask.py b/mpas_analysis/ocean/plot_climatology_map_subtask.py index 61c420a9f..dd717f522 100644 --- a/mpas_analysis/ocean/plot_climatology_map_subtask.py +++ b/mpas_analysis/ocean/plot_climatology_map_subtask.py @@ -65,6 +65,9 @@ class PlotClimatologyMapSubtask(AnalysisTask): # {{{ observationTitleLabel : str the title of the observations subplot + diffTitleLabel : str, optional + the title of the difference subplot + unitsLabel : str the units of the plotted field, to be displayed on color bars @@ -85,13 +88,18 @@ class PlotClimatologyMapSubtask(AnalysisTask): # {{{ galleryName : str the name of the gallery in which this plot belongs + depth : {None, float, 'top', 'bot'} + Depth at which to perform the comparison, 'top' for the sea surface + 'bot' for the sea floor + Authors ------- Luke Van Roekel, Xylar Asay-Davis, Milena Veneziani """ def __init__(self, parentTask, season, comparisonGridName, - remapMpasClimatologySubtask, remapObsClimatologySubtask): + remapMpasClimatologySubtask, remapObsClimatologySubtask, + depth=None): # {{{ ''' Construct one analysis subtask for each plot (i.e. each season and @@ -117,6 +125,10 @@ def __init__(self, parentTask, season, comparisonGridName, The subtask for remapping the observational climatology that this subtask will plot + depth : {float, 'top', 'bot'}, optional + Depth the data is being plotted, 'top' for the sea surface + 'bot' for the sea floor + Authors ------- Xylar Asay-Davis @@ -124,10 +136,18 @@ def __init__(self, parentTask, season, comparisonGridName, ''' self.season = season + self.depth = depth self.comparisonGridName = comparisonGridName self.remapMpasClimatologySubtask = remapMpasClimatologySubtask self.remapObsClimatologySubtask = remapObsClimatologySubtask subtaskName = 'plot{}_{}'.format(season, comparisonGridName) + + if depth is None: + self.depthSuffix = '' + else: + self.depthSuffix = 'depth_{}'.format(depth) + subtaskName = '{}_{}'.format(subtaskName, self.depthSuffix) + config = parentTask.config taskName = parentTask.taskName tags = parentTask.tags @@ -146,7 +166,8 @@ def __init__(self, parentTask, season, comparisonGridName, def set_plot_info(self, outFileLabel, fieldNameInTitle, mpasFieldName, obsFieldName, observationTitleLabel, unitsLabel, imageCaption, galleryGroup, groupSubtitle, groupLink, - galleryName): # {{{ + galleryName, diffTitleLabel='Model - Observations'): + # {{{ """ Store attributes related to plots, plot file names and HTML output. @@ -187,6 +208,9 @@ def set_plot_info(self, outFileLabel, fieldNameInTitle, mpasFieldName, galleryName : str the name of the gallery in which this plot belongs + diffTitleLabel : str, optional + the title of the difference subplot + Authors ------- Xylar Asay-Davis @@ -196,6 +220,7 @@ def set_plot_info(self, outFileLabel, fieldNameInTitle, mpasFieldName, self.mpasFieldName = mpasFieldName self.obsFieldName = obsFieldName self.observationTitleLabel = observationTitleLabel + self.diffTitleLabel = diffTitleLabel self.unitsLabel = unitsLabel # xml/html related variables @@ -204,6 +229,22 @@ def set_plot_info(self, outFileLabel, fieldNameInTitle, mpasFieldName, self.groupSubtitle = groupSubtitle self.groupLink = groupLink self.galleryName = galleryName + + season = self.season + depth = self.depth + if depth is None: + self.fieldNameInTitle = fieldNameInTitle + self.thumbnailDescription = season + elif depth == 'top': + self.fieldNameInTitle = 'Sea Surface {}'.format(fieldNameInTitle) + self.thumbnailDescription = '{} surface'.format(season) + elif depth == 'bot': + self.fieldNameInTitle = 'Sea Floor {}'.format(fieldNameInTitle) + self.thumbnailDescription = '{} floor'.format(season) + else: + self.fieldNameInTitle = '{} at z={} m'.format(fieldNameInTitle, + depth) + self.thumbnailDescription = '{} z={} m'.format(season, depth) # }}} def setup_and_check(self): # {{{ @@ -230,16 +271,18 @@ def setup_and_check(self): # {{{ mainRunName = config.get('runs', 'mainRunName') self.xmlFileNames = [] - self.filePrefixes = {} - if self.comparisonGridName == 'latlon': - self.filePrefix = '{}_{}_{}_years{:04d}-{:04d}'.format( - self.outFileLabel, mainRunName, - self.season, self.startYear, self.endYear) - else: - self.filePrefix = '{}_{}_{}_{}_years{:04d}-{:04d}'.format( - self.outFileLabel, self.comparisonGridName, - mainRunName, self.season, self.startYear, self.endYear) + prefixPieces = [self.outFileLabel] + if self.comparisonGridName != 'latlon': + prefixPieces.append(self.comparisonGridName) + prefixPieces.append(mainRunName) + if self.depth is not None: + prefixPieces.append(self.depthSuffix) + years = 'years{:04d}-{:04d}'.format(self.startYear, self.endYear) + prefixPieces.extend([self.season, years]) + + self.filePrefix = '_'.join(prefixPieces) + self.xmlFileNames.append('{}/{}.xml'.format(self.plotsDirectory, self.filePrefix)) # }}} @@ -254,6 +297,7 @@ def run_task(self): # {{{ """ season = self.season + depth = self.depth comparisonGridName = self.comparisonGridName self.logger.info("\nPlotting 2-d maps of {} climatologies for {} on " "the {} grid...".format(self.fieldNameInTitle, @@ -266,6 +310,16 @@ def run_task(self): # {{{ remappedModelClimatology = xr.open_dataset(remappedFileName) + if depth is not None: + if str(depth) not in remappedModelClimatology.depthSlice.values: + raise KeyError('The climatology you are attempting to perform ' + 'depth slices of was originally created\n' + 'without depth {}. You will need to delete and ' + 'regenerate the climatology'.format(depth)) + + remappedModelClimatology = remappedModelClimatology.sel( + depthSlice=str(depth), drop=True) + # now the observations remappedFileName = self.remapObsClimatologySubtask.get_file_name( stage='remapped', season=season, @@ -273,6 +327,16 @@ def run_task(self): # {{{ remappedObsClimatology = xr.open_dataset(remappedFileName) + if depth is not None: + if str(depth) not in remappedObsClimatology.depthSlice.values: + raise KeyError('The climatology you are attempting to perform ' + 'depth slices of was originally created\n' + 'without depth {}. You will need to delete and ' + 'regenerate the climatology'.format(depth)) + + remappedObsClimatology = remappedObsClimatology.sel( + depthSlice=str(depth), drop=True) + if self.comparisonGridName == 'latlon': self._plot_latlon(remappedModelClimatology, remappedObsClimatology) elif self.comparisonGridName == 'antarctic': @@ -327,7 +391,7 @@ def _plot_latlon(self, remappedModelClimatology, remappedObsClimatology): title=title, modelTitle='{}'.format(mainRunName), obsTitle=self.observationTitleLabel, - diffTitle='Model-Observations', + diffTitle=self.diffTitleLabel, cbarlabel=self.unitsLabel) caption = '{} {}'.format(season, self.imageCaption) @@ -340,7 +404,7 @@ def _plot_latlon(self, remappedModelClimatology, remappedObsClimatology): groupSubtitle=self.groupSubtitle, groupLink=self.groupLink, gallery=self.galleryName, - thumbnailDescription=season, + thumbnailDescription=self.thumbnailDescription, imageDescription=caption, imageCaption=caption) @@ -402,7 +466,7 @@ def _plot_antarctic(self, remappedModelClimatology, title=title, modelTitle='{}'.format(mainRunName), obsTitle=self.observationTitleLabel, - diffTitle='Model - Observations', + diffTitle=self.diffTitleLabel, cbarlabel=self.unitsLabel) upperGridName = comparisonGridName[0].upper() + comparisonGridName[1:] @@ -417,7 +481,7 @@ def _plot_antarctic(self, remappedModelClimatology, groupSubtitle=self.groupSubtitle, groupLink=self.groupLink, gallery=self.galleryName, - thumbnailDescription=season, + thumbnailDescription=self.thumbnailDescription, imageDescription=caption, imageCaption=caption) diff --git a/mpas_analysis/ocean/remap_depth_slices_subtask.py b/mpas_analysis/ocean/remap_depth_slices_subtask.py new file mode 100644 index 000000000..e15fa20fa --- /dev/null +++ b/mpas_analysis/ocean/remap_depth_slices_subtask.py @@ -0,0 +1,225 @@ +from __future__ import absolute_import, division, print_function, \ + unicode_literals + +import xarray as xr +import numpy as np + +from ..shared.climatology import RemapMpasClimatologySubtask + +from ..shared.mpas_xarray import mpas_xarray + +from .utility import compute_zmid + + +class RemapDepthSlicesSubtask(RemapMpasClimatologySubtask): # {{{ + """ + A task for creating and remapping climatologies of MPAS fields sliced + at a given set of depths + + Attributes + ---------- + depths : list of {None, float, 'top', 'bot'} + A list of depths at which the climatology will be sliced in the + vertical. + + maxLevelCell : xarray.DataArray + The vertical index of the bottom cell in MPAS results + + verticalIndices : xarray.DataArray + The vertical indices of slice to be plotted + + Authors + ------- + Xylar Asay-Davis + """ + + def __init__(self, mpasClimatologyTask, parentTask, climatologyName, + variableList, seasons, depths, comparisonGridNames=['latlon'], + iselValues=None): + # {{{ + ''' + Construct the analysis task and adds it as a subtask of the + ``parentTask``. + + Parameters + ---------- + mpasClimatologyTask : ``MpasClimatologyTask`` + The task that produced the climatology to be remapped + + parentTask : ``AnalysisTask`` + The parent task, used to get the ``taskName``, ``config`` and + ``componentName`` + + climatologyName : str + A name that describes the climatology (e.g. a short version of + the important field(s) in the climatology) used to name the + subdirectories for each stage of the climatology + + variableList : list of str + A list of variable names in ``timeSeriesStatsMonthly`` to be + included in the climatologies + + seasons : list of str, optional + A list of seasons (keys in ``shared.constants.monthDictionary``) + to be computed or ['none'] (not ``None``) if only monthly + climatologies are needed. + + depths : list of {None, float, 'top', 'bot'} + A list of depths at which the climatology will be sliced in the + vertical. + + comparisonGridNames : list of {'latlon', 'antarctic'}, optional + The name(s) of the comparison grid to use for remapping. + + iselValues : dict, optional + A dictionary of dimensions and indices (or ``None``) used to + extract a slice of the MPAS field(s). + + Authors + ------- + Xylar Asay-Davis + ''' + + self.depths = depths + + # call the constructor from the base class + # (RemapMpasClimatologySubtask) + super(RemapDepthSlicesSubtask, self).__init__( + mpasClimatologyTask, parentTask, climatologyName, variableList, + seasons, comparisonGridNames, iselValues) + + + def run_task(self): # {{{ + """ + Compute climatologies of T or S from ACME/MPAS output + + This function has been overridden to load ``maxLevelCell`` from a + restart file for later use in indexing bottom T and S. + ``verticalIndex`` is also computed for later indexing of + the model level. It then simply calls the run function from + ClimatologyMapOcean. + + Authors + ------- + Xylar Asay-Davis + """ + + # first, load the land-ice mask from the restart file + ds = xr.open_dataset(self.restartFileName) + ds = mpas_xarray.subset_variables(ds, ['maxLevelCell', + 'bottomDepth', + 'layerThickness']) + ds = ds.isel(Time=0) + + self.maxLevelCell = ds.maxLevelCell - 1 + + depthNames = [str(depth) for depth in self.depths] + + zMid = compute_zmid(ds.bottomDepth, ds.maxLevelCell, + ds.layerThickness) + + nVertLevels = zMid.shape[1] + zMid.coords['verticalIndex'] = \ + ('nVertLevels', + np.arange(nVertLevels)) + + zTop = zMid.isel(nVertLevels=0) + # Each vertical layer has at most one non-NaN value so the "sum" + # over the vertical is used to collapse the array in the vertical + # dimension + zBot = zMid.where(zMid.verticalIndex == self.maxLevelCell).sum( + dim='nVertLevels') + + verticalIndices = np.zeros((len(self.depths), ds.dims['nCells']), int) + + mask = np.zeros(verticalIndices.shape, bool) + + for depthIndex, depth in enumerate(self.depths): + depth = self.depths[depthIndex] + if depth == 'top': + # switch to zero-based index + verticalIndices[depthIndex, :] = 0 + mask[depthIndex, :] = self.maxLevelCell.values >= 0 + elif depth == 'bot': + # switch to zero-based index + verticalIndices[depthIndex, :] = self.maxLevelCell.values + mask[depthIndex, :] = self.maxLevelCell.values >= 0 + else: + + verticalIndex = np.argmin(np.abs(zMid-depth), axis=1) + + verticalIndices[depthIndex, :] = verticalIndex.values + mask[depthIndex, :] = np.logical_and(depth <= zTop, + depth >= zBot).values + + self.verticalIndices = \ + xr.DataArray.from_dict({'dims': ('depthSlice', 'nCells'), + 'coords': {'depthSlice': + {'dims': ('depthSlice',), + 'data': depthNames}}, + 'data': verticalIndices}) + self.verticalIndexMask = \ + xr.DataArray.from_dict({'dims': ('depthSlice', 'nCells'), + 'coords': {'depthSlice': + {'dims': ('depthSlice',), + 'data': depthNames}}, + 'data': mask}) + + # then, call run from the base class (RemapMpasClimatologySubtask), + # which will perform the main function of the task + super(RemapDepthSlicesSubtask, self).run_task() + + def customize_masked_climatology(self, climatology): # {{{ + """ + Uses ``verticalIndex`` to slice the 3D climatology field at each + requested depth. The resulting field has the depth appended to + the variable name. + + Parameters + ---------- + climatology : ``xarray.Dataset`` object + the climatology data set + + Returns + ------- + climatology : ``xarray.Dataset`` object + the modified climatology data set + + Authors + ------- + Xylar Asay-Davis + """ + + if self.depths is None: + return climatology + + climatology.coords['verticalIndex'] = \ + ('nVertLevels', + np.arange(climatology.dims['nVertLevels'])) + + depthNames = [str(depth) for depth in self.depths] + + climatology.coords['depthSlice'] = ('depthSlice', depthNames) + + for variableName in self.variableList: + if 'nVertLevels' not in climatology[variableName].dims: + continue + + # mask only the values with the right vertical index + da = climatology[variableName].where( + climatology.verticalIndex == self.verticalIndices) + + # Each vertical layer has at most one non-NaN value so the "sum" + # over the vertical is used to collapse the array in the vertical + # dimension + climatology[variableName] = \ + da.sum(dim='nVertLevels').where(self.verticalIndexMask) + + climatology = climatology.drop('verticalIndex') + + return climatology # }}} + + # }}} + + +# vim: foldmethod=marker ai ts=4 sts=4 et sw=4 ft=python diff --git a/mpas_analysis/ocean/utility.py b/mpas_analysis/ocean/utility.py new file mode 100644 index 000000000..8600977ef --- /dev/null +++ b/mpas_analysis/ocean/utility.py @@ -0,0 +1,62 @@ +""" +A utility for computing common ocean fields (e.g. zMid) from datasets + +Authors +------- +Xylar Asay-Davis +""" + +from __future__ import absolute_import, division, print_function, \ + unicode_literals + +import numpy +import xarray + + +def compute_zmid(bottomDepth, maxLevelCell, layerThickness): # {{{ + """ + Computes zMid given data arrays for bottomDepth, maxLevelCell and + layerThickness + + Parameters + ---------- + bottomDepth : ``xarray.DataArray`` + the depth of the ocean bottom (positive) + + maxLevelCell : ``xarray.DataArray`` + the 1-based vertical index of the bottom of the ocean + + layerThickness : ``xarray.DataArray`` + the thickness of MPAS-Ocean layers (possibly as a function of time) + + Returns + ------- + zMid : ``xarray.DataArray`` + the vertical coordinate defining the middle of each layer, masked below + the bathymetry + + Authors + ------- + Xylar Asay-Davis + """ + + nVertLevels = \ + layerThickness.shape[layerThickness.dims.index('nVertLevels')] + + vertIndex = \ + xarray.DataArray.from_dict({'dims': ('nVertLevels',), + 'data': numpy.arange(nVertLevels)}) + + layerThickness = layerThickness.where(vertIndex < maxLevelCell) + + thicknessSum = layerThickness.sum(dim='nVertLevels') + thicknessCumSum = layerThickness.cumsum(dim='nVertLevels') + zSurface = -bottomDepth+thicknessSum + + zLayerBot = zSurface - thicknessCumSum + + zMid = zLayerBot + 0.5*layerThickness + + return zMid # }}} + +# vim: foldmethod=marker ai ts=4 sts=4 et sw=4 ft=python diff --git a/mpas_analysis/shared/plot/plotting.py b/mpas_analysis/shared/plot/plotting.py index fd86777c8..78d72dbb4 100644 --- a/mpas_analysis/shared/plot/plotting.py +++ b/mpas_analysis/shared/plot/plotting.py @@ -716,10 +716,19 @@ def plot_panel(ax, title, array, cmap, norm, ticks): (cmapDiff, normDiff) = _setup_colormap_and_norm( config, colorMapSectionName, suffix='Difference') - colorbarTicksResult = config.getExpression(colorMapSectionName, - 'colorbarTicksResult') - colorbarTicksDifference = config.getExpression( - colorMapSectionName, 'colorbarTicksDifference') + if config.has_option(colorMapSectionName, 'colorbarTicksResult'): + colorbarTicksResult = config.getExpression(colorMapSectionName, + 'colorbarTicksResult', + usenumpyfunc=True) + else: + colorbarTicksResult = None + if config.has_option(colorMapSectionName, 'colorbarTicksDifference'): + colorbarTicksDifference = config.getExpression( + colorMapSectionName, 'colorbarTicksDifference', + usenumpyfunc=True) + else: + colorbarTicksDifference = None + elif colorMapType == 'indexed': (cmapModelObs, colorbarTicksResult) = setup_colormap( diff --git a/preprocess_observations/README.md b/preprocess_observations/README.md new file mode 100644 index 000000000..ba0dc5c90 --- /dev/null +++ b/preprocess_observations/README.md @@ -0,0 +1,8 @@ +# Observations Preprocessing Scripts + +This directory contains scripts for preprocessing observations. To run the +scripts, copy them to the base directory (`..`), change the paths to the +original obsrevational data sets (see comments in each script for sources +of the relevant data) and run the script to perform preprocessing. The +scripts typically compute climatologies and/or perform remapping to a +comparison grid. \ No newline at end of file diff --git a/preprocess_observations/mds.py b/preprocess_observations/mds.py new file mode 100644 index 000000000..7e81bfa13 --- /dev/null +++ b/preprocess_observations/mds.py @@ -0,0 +1,605 @@ +import sys +import re +import glob +import numpy as np +from operator import mul + +debug = False + +################################################################################ +# metafile parsing + +# for python2.5 +try: next +except NameError: + def next ( obj ): return obj.next() + +_currentline = '' + +class ParseError(ValueError): + def __str__(self): + metafile = self.args[0] + lines = self.args[1:] + try: + name = metafile.name + except AttributeError: + name = metafile + + return '\n'.join(('in metafile: '+name,) + + lines + + ('in: ' + _currentline,)) + + +# these deal with comments in the metafile + +_comment_pattern = re.compile( + r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"', + re.DOTALL | re.MULTILINE + ) + +def _comment_replacer(match): + s = match.group(0) + if s.startswith('/'): + return "" + else: + return s + +def strip_comments(text): + """ strips C and C++ style comments from text """ + return re.sub(_comment_pattern, _comment_replacer, text) + + +_string_pattern = re.compile(r"'(.*)'$") + +def parse1(s): + """ convert one item to appropriate type """ + m = _string_pattern.match(s) + if m: + s = m.group(1) + # unquote quotes + s = re.sub(r"''","'",s) + return s + + if '.' in s or 'e' in s.lower(): + return float(s) + else: + try: + return int(s) + except ValueError: + raise ParseError("Cannot parse value: " + s) + + +_closing = {'[':']', + '{':'}', + } + +def parsemeta(metafile): + """ parses metafile (file object or filename) into a dictionary of lists + of floats, ints or strings + """ + global _currentline + + try: + lines = open(metafile) + except TypeError: + lines = iter(metafile) + + d = {} + for line in lines: + line = strip_comments(line) + # skip empty lines + if re.match(r'\s*$', line): + continue + + m = re.match(r' *(\w*) *= *(.*?) *$', line) + if m: + key,line = m.groups() + else: + raise ParseError(metafile,line) + + # look for the opening delimiter ('[' or '{') + opening = line[0] + try: + closing = _closing[opening] + except KeyError: + raise ParseError(metafile,line,'Values must be enclosed in [] or {}.') + + # read more lines until a matching closing delimiter is found + while closing not in line: + try: + nextline = next(lines) + except StopIteration: + raise ParseError(metafile,line,'No closing ' + closing + ' found.') + + line += ' ' + strip_comments(nextline).rstrip() + + if line[-2:] != closing + ';': + raise ParseError(metafile,line, + 'Values must be enclosed in "[ ];" or "{ };".') + + # remove delimiters + line = line[1:-2].strip(" ,") + _currentline = line + + if opening == '[': + # [] can contain any type of values, separated by commas + val = [ parse1(s) for s in re.split(r',? *',line) ] + else: + # {} can only contain single quote-delimited strings separated by space + val = [ s.rstrip() for s in re.split(r"' *'", line.strip("'")) ] + + d[key] = val + + return d + +################################################################################ + +def message(*args): + sys.stdout.write(' '.join([str(s) for s in args]) + '\n') + + +def warning(*args): + sys.stderr.write(' '.join([str(s) for s in args]) + '\n') + + +def aslist(i): + """ if iterable, turn into list, otherwise put into list """ + try: + res = list(i) + except TypeError: + res = [i] + return res + + +def fromfileshape(filename,dtype,shape=None,**kwargs): + return np.fromfile(filename, dtype, **kwargs).reshape(shape) + + +def scanforfiles(fname): + """ return list of iteration numbers for which metafiles with base fname exist """ + import glob + allfiles = glob.glob(fname + '.' + 10*'[0-9]' + '.001.001.meta') + if len(allfiles) == 0: + allfiles = glob.glob(fname + '.' + 10*'[0-9]' + '.meta') + off = -5 + else: + off = -13 + + itrs = [ int(s[off-10:off]) for s in allfiles ] + itrs.sort() + return itrs + + +def readmeta(f): + """ read meta file and extract tile/timestep-specific parameters """ + meta = parsemeta(f) + dimList = meta.pop('dimList') + # pythonize + gdims = tuple(dimList[-3::-3]) + i0s = [ i-1 for i in dimList[-2::-3] ] + ies = dimList[-1::-3] + # remove file-specific parameters + timeInterval = meta.pop('timeInterval', None) + timeStepNumber = meta.pop('timeStepNumber', None) + map2gl = meta.pop('map2glob', None) + # put back only global dimensions + meta['dimList'] = list(gdims[::-1]) + return gdims,i0s,ies,timeStepNumber,timeInterval,map2gl,meta + + +_typeprefixes = {'ieee-be':'>', + 'b' :'>', + '>' :'>', + 'ieee-le':'<', + 'l' :'<', + '<' :'<', + } +_typesuffixes = {'float32':'f4', + 'float64':'f8', + } + +def rdmds(fnamearg,itrs=-1,machineformat='b',rec=None,fill_value=0, + returnmeta=False,astype=float,region=None,lev=(), + usememmap=False,mm=False,squeeze=True,verbose=False): + """ a = rdmds(fname,...) + a = rdmds(fname,itrs,...) + a,its,meta = rdmds(fname,...,returnmeta=True) + + Read meta-data files as written by MITgcm. + + Without itrs, will try to read + + fname.meta or fname.001.001.meta, ... + + If itrs is a list of integers of an integer, it will read the corresponding + + fname.000000iter.meta, ... + + If itrs is NaN, it will read all iterations for which files are found. + If itrs is Inf, it will read the highest iteration found. + + fname may contain shell wildcards, which is useful for tile files organized + into directories, e.g., + + T = rdmds('prefix*/T', 2880) + + will read prefix0000/T.0000002880.*, prefix0001/T.0000002880.*, ... + (and any others that match the wildcard, so be careful how you name things!) + + Returns: + + a :: numpy array of the data read + its :: list of iteration numbers read (only if itrs=NaN or Inf) + meta :: dictionary of metadata (only if returnmeta=True) + + Keyword arguments: + + machineformat :: endianness ('b' or 'l', default 'b') + rec :: list of records to read (default all) + useful for pickups and multi-field diagnostics files + fill_value :: fill value for missing (blank) tiles (default 0) + astype :: data type to return (default: double precision) + None: keep data type/precision of file + region :: (x0,x1,y0,y1) read only this region (default (0,nx,0,ny)) + lev :: list of levels to read, or, for multiple dimensions + (excluding x,y), tuple(!) of lists (see examples below) + usememmap :: if True, use a memory map for reading data (default False) + recommended when using lev, or region with global files + to save memory and, possibly, time + + Examples: + + XC = rdmds('XC') + XC = rdmds('res_*/XC') + T = rdmds('T.0000002880') + T = rdmds('T',2880) + T2 = rdmds('T',[2880,5760]) + T,its = rdmds('T',numpy.Inf) + VVEL = rdmds('pickup',2880,rec=range(50,100)) + a5 = rdmds('diags',2880,rec=0,lev=[5]) + a = rdmds('diags',2880,rec=0,lev=([0],[0,1,5,6,7])) + from numpy import r_ + a = rdmds('diags',2880,rec=0,lev=([0],r_[:2,5:8])) # same as previous + a = rdmds('diags',2880,rec=0)[0, [0,1,5,6,7], ...] # same, but less efficient + a = rdmds('diags',2880)[0, 0, [0,1,5,6,7], ...] # even less efficient + """ + import functools + usememmap = usememmap or mm + if usememmap: + readdata = np.memmap + else: + readdata = fromfileshape + + # add iteration number to file name unless itrs is -1 + additrs = itrs != -1 + if itrs is np.nan: + # all iterations + itrs = scanforfiles(fnamearg) + if verbose: warning('Reading {0} time levels: '.format(len(itrs)), *itrs) + returnits = True + itrsislist = True + elif itrs is np.inf: + # last iteration + itrs = scanforfiles(fnamearg) + if len(itrs): + if verbose: warning('Found {0} time levels, reading'.format(len(itrs)), itrs[-1]) + else: + if verbose: warning('Found 0 time levels for {}'.format(fnamearg)) + itrs = itrs[-1:] + returnits = True + itrsislist = False + else: + returnits = False + itrsislist = np.iterable(itrs) + + # always make itrs a list + itrs = aslist(itrs) + + allrec = rec is None + reclist = aslist(rec) + if not isinstance(lev,tuple): + lev = (lev,) + levs = tuple( aslist(l) for l in lev ) + levdims = tuple(len(l) for l in levs) + levinds = np.ix_(*levs) + nlev = len(levdims) + + if usememmap: + recsatonce = True + readdata = np.memmap + else: + recsatonce = allrec + readdata = fromfileshape + + try: + typepre = _typeprefixes[machineformat] + except KeyError: + raise ValueError('Allowed machineformats: ' + ' '.join(_typeprefixes)) + + arr = None + metaref = {} + timeStepNumbers = [] + timeIntervals = [] + for iit,it in enumerate(itrs): + if additrs: + fname = fnamearg + '.{0:010d}'.format(int(it)) + else: + fname = fnamearg + + metafiles = glob.glob(fname + 2*('.'+3*'[0-9]') + '.meta') or glob.glob(fname+'.meta') + if len(metafiles) == 0: + raise IOError('No files found for ' + fname + '.meta') + + if verbose: warning(metafiles[0]) + + if debug: warning('Found',len(metafiles),'metafiles for iteration',it) + + for metafile in metafiles: + gdims,i0s,ies,timestep,timeinterval,map2gl,meta = readmeta(metafile) + if arr is None: + # initialize, allocate + try: + dataprec, = meta['dataprec'] + except KeyError: + dataprec, = meta['format'] + tp = typepre + _typesuffixes[dataprec] + size = np.dtype(tp).itemsize + if astype is None: astype = tp + recshape = tuple( ie-i0 for i0,ie in zip(i0s,ies) ) + count = functools.reduce(mul, recshape) + nrecords, = meta['nrecords'] + tileshape = (nrecords,) + recshape + if allrec: + reclist = range(nrecords) + recinds = np.s_[:,] + levinds + else: + recinds = np.ix_(reclist, *levs) + + if region is None: + ri0,rie,rj0,rje = 0,gdims[-1],0,gdims[-2] + else: + ri0,rie,rj0,rje = region + if ri0 < 0: ri0 += gdims[-1] + if rie < 0: rie += gdims[-1] + if rj0 < 0: rj0 += gdims[-2] + if rje < 0: rje += gdims[-2] + + assert nlev+2 <= len(gdims) + rdims = levdims + gdims[len(levdims):-2] + (rje-rj0,rie-ri0) + # always include itrs and rec dimensions and squeeze later + arr = np.empty((len(itrs),len(reclist))+rdims, astype) + arr[...] = fill_value + metaref = meta + else: + if meta != metaref: + raise ValueError('Meta files not compatible') + + datafile = metafile[:-4] + 'data' + + if region is not None: + if map2gl is None: + # overlap of tile with region: + i0 = min(rie, max(ri0, i0s[-1])) + ie = min(rie, max(ri0, ies[-1])) + j0 = min(rje, max(rj0, i0s[-2])) + je = min(rje, max(rj0, ies[-2])) + # source indices + I0 = i0 - i0s[-1] + Ie = ie - i0s[-1] + J0 = j0 - i0s[-2] + Je = je - i0s[-2] + # target indices + i0s[-1] = i0 - ri0 + ies[-1] = ie - ri0 + i0s[-2] = j0 - rj0 + ies[-2] = je - rj0 + else: + raise NotImplementedError('Region selection is not implemented for map2glob != [0,1]') + + sl = tuple( slice(i0,ie) for i0,ie in zip(i0s,ies) ) + if map2gl is None: + # part of arr that will receive tile (all records) + arrtile = arr[(iit,slice(None))+sl] + else: + ny,nx = arr.shape[-2:] + i0 = i0s[-1] + j0 = i0s[-2] + ie = ies[-1] + je = ies[-2] + # "flat" stride for j + jstride = map2gl[1]*nx + map2gl[0] + n = (je-j0)*jstride + # start of a jstride by je-j0 block that contains this tile + ii0 = min(i0+nx*j0, nx*ny-n) + # tile starts at ioff+i0 + ioff = nx*j0 - ii0 + # flatten x,y dimensions + arrflat = arr.reshape(arr.shape[:-2]+(nx*ny,)) + # extract tile + arrmap = arrflat[...,ii0:ii0+n].reshape(arr.shape[:-2]+(je-j0,jstride))[...,:,ioff+i0:ioff+ie] + # slice non-x,y dimensions (except records) + arrtile = arrmap[(iit,slice(None))+sl[:-2]] + del arrflat,arrmap + + if recsatonce: + if region is None: + arrtile[...] = readdata(datafile, tp, shape=tileshape)[recinds] + else: + if Ie > I0 and Je > J0: + if debug: message(datafile, I0,Ie,J0,Je) + arrtile[...] = readdata(datafile, tp, shape=tileshape)[recinds + np.s_[...,J0:Je,I0:Ie]] + else: + f = open(datafile) + for irec,recnum in enumerate(reclist): + if recnum < 0: recnum += nrecords + f.seek(recnum*count*size) + if region is None: + arrtile[irec] = np.fromfile(f, tp, count=count).reshape(recshape)[levinds] + else: + if Ie > I0 and Je > J0: + if debug: message(datafile, I0,Ie,J0,Je) + tilerec = np.fromfile(f, tp, count=count).reshape(recshape) + arrtile[irec] = tilerec[levinds + np.s_[...,J0:Je,I0:Ie]] + f.close() + + if timestep is not None: + timeStepNumbers.extend(timestep) + + if timeinterval is not None: + timeIntervals.append(timeinterval) + + # put list of iteration numbers back into metadata dictionary + if len(timeStepNumbers): + metaref['timeStepNumber'] = timeStepNumbers + + if len(timeIntervals): + metaref['timeInterval'] = timeIntervals + + if arr is None: + arr = np.array([]) + else: + # squeeze singleton iteration, record and level dimensions like matlab version + dims = (len(itrs),len(reclist)) + levdims + if squeeze: + # squeeze all singleton dimensions + squeezed = tuple( d for d in dims if d > 1 ) + else: + # squeeze all that came from scalar arguments + keepers = [itrsislist, np.iterable(rec)] + [np.iterable(l) for l in lev] + squeezed = tuple( d for d,keep in zip(dims, keepers) if keep ) + + arr = arr.reshape(squeezed+arr.shape[2+nlev:]) + + if returnmeta: + meta = dict((k.lower(),v) for k,v in metaref.items()) + return arr,itrs,meta +# elif returnits: +# return arr,itrs + else: + return arr + + +def wrmds(fbase, arr, itr=None, dataprec='float32', ndims=None, nrecords=None, + times=None, fields=None, simulation=None, machineformat='b', + deltat=None, dimlist=None): + ''' wrmds(fbase, arr, itr=None, ...) + + Write array arr to an mds meta/data file set. If itr is given, + the files will be named fbase.0000000itr.data and fbase.0000000itr.meta, + otherwise just fbase.data and fbase.meta. + + Parameters + ---------- + dataprec :: precision of resulting file ('float32' or 'float64') + ndims :: number of non-record dimensions; extra (leading) dimensions + will be folded into 1 record dimension + nrecords :: number of records; will fold as many leading dimensions as + necessary (has to match shape!) + times :: times to write into meta file. Either a single float or a list + of two for a time interval + fields :: list of fields + simulation :: string describing the simulation + machineformat :: 'b' or 'l' for big or little endian + deltat :: time step; provide in place of either times or itr to have one + computed from the other + dimlist :: dimensions as will be stored in file (only useful when passing + meta data from an existing file to wrmds as **kwargs) + ''' + if type(dataprec) == type([]): dataprec, = dataprec + if type(ndims) == type([]): ndims, = ndims + if type(nrecords) == type([]): nrecords, = nrecords + if type(simulation) == type([]): simulation, = simulation + if type(machineformat) == type([]): machineformat, = machineformat + if type(deltat) == type([]): deltat, = deltat + + tp = _typeprefixes[machineformat] + try: + tp = tp + _typesuffixes[dataprec] + except KeyError: + raise ValueError("dataprec must be 'float32' or 'float64'.") + + if ndims is None: + if nrecords is None: + ndims = min(3,len(arr.shape)) + else: + # see how many leading dims we need to make up nrecords + dims = list(arr.shape[::-1]) + n = 1 + while n < nrecords: + n *= dims.pop() + + assert n == nrecords + ndims = len(dims) + + dims = arr.shape[-1:-ndims-1:-1] + nrec = np.prod(arr.shape[:-ndims], dtype=int) + if nrecords is not None and nrecords != nrec: + raise ValueError('Shape/nrecords mismatch') + if dimlist is not None and tuple(dimlist) != dims: + raise ValueError('Shape/dimlist mismatch: {} vs {}'.format(dims, dimlist)) + + if arr.ndim > ndims + 1: + sys.stderr.write("Warning: folding several dimensions into record dimension.\n") + +# arr = arr.reshape((-1,)+arr.shape[-ndims:]) + + if times is not None: + try: + iter(times) + except TypeError: + times = [ times ] + + if deltat is not None: + if itr is None: + itr = int(times[-1]//deltat) + elif times is None: + times = [ deltat*itr ] + else: + sys.stderr.write('Warning: discarding deltat.\n') + + if itr is not None: + fbase = fbase + '.{:010d}'.format(itr) + + with open(fbase + '.meta', 'w') as f: + if simulation is not None: + f.write(" simulation = { '" + simulation + "' };\n") + + f.write(" nDims = [ {:3d} ];\n".format(ndims)) + + if max(dims) < 10000: + fmt = '{:5d}' + else: + fmt = '{:10d}' + + fmt = fmt + ',' + fmt + ',' + fmt + + f.write(" dimList = [\n " + + ",\n ".join(fmt.format(d,1,d) for d in dims) + + "\n ];\n") + + # skipping m2gl + + f.write(" dataprec = [ '" + dataprec + "' ];\n") + + f.write(" nrecords = [ {:5d} ];\n".format(nrec)) + + if itr is not None: + f.write(" timeStepNumber = [ {:10d} ];\n".format(itr)) + + if times is not None: + f.write(" timeInterval = [" + + "".join("{:20.12E}".format(t) for t in times) + + " ];\n") + + if fields is not None: + nflds = len(fields) + f.write(" nFlds = [ {:4d} ];\n".format(nflds)) + f.write(" fldList = {\n") + for row in range((nflds+19)//20): + for field in fields[20*row:20*(row+1)]: + f.write(" '{:<8s}'".format(field)) + f.write("\n") + f.write(" };\n") + + arr.astype(tp).tofile(fbase + '.data') + diff --git a/preprocess_observations/remap_SOSE_T_S.py b/preprocess_observations/remap_SOSE_T_S.py new file mode 100644 index 000000000..17587af55 --- /dev/null +++ b/preprocess_observations/remap_SOSE_T_S.py @@ -0,0 +1,211 @@ +import numpy +import xarray +import matplotlib.pyplot as plt +import matplotlib.colors as colors +import sys +from scipy.io import loadmat +import os + +from mpas_analysis.shared.interpolation import Remapper +from mpas_analysis.shared.grid import LatLonGridDescriptor +from mpas_analysis.shared.climatology.climatology \ + import get_antarctic_stereographic_comparison_descriptor +from mpas_analysis.configuration.MpasAnalysisConfigParser \ + import MpasAnalysisConfigParser + +from mds import rdmds + + +def get_bottom_indices(cellFraction): + nx, ny, nz = cellFraction.shape + botIndices = -1*numpy.ones((nx, ny), int) + for zIndex in range(nz): + mask = cellFraction[:, :, zIndex] > 0. + botIndices[mask] = zIndex + return botIndices + + +def get_monthly_average(filePrefix): + field, itrs, metadata = rdmds(filePrefix, rec=[0], returnmeta=True) + nz, ny, nx = field.shape + # print nx, ny, nz + yearCount = metadata['nrecords'][0]/12 + dims = [12, nx, ny, nz] + + mask3D = cellFraction <= 0. + mask2D = botIndices == -1 + xIndices, yIndices = numpy.meshgrid(numpy.arange(nx), numpy.arange(ny), + indexing='ij') + monthlyClimatologies = numpy.ma.masked_all(dims) + botMonthlyClimatologies = numpy.ma.masked_all((12, nx, ny)) + for month in range(12): + first = True + for year in range(yearCount): + print '{:04d}-{:02d}'.format(year+2005, month+1) + recordIndex = year*12 + month + field = rdmds(filePrefix, rec=[recordIndex]) + field = field.transpose(2, 1, 0) + + field = numpy.ma.masked_array(field, mask=mask3D) + if first: + monthlyClimatologies[month, :, :, :] = field/float(yearCount) + first = False + else: + monthlyClimatologies[month, :, :, :] = \ + monthlyClimatologies[month, :, :, :] + \ + field/float(yearCount) + botMonthlyClimatologies[month, :, :] = \ + numpy.ma.masked_array(field[xIndices, yIndices, botIndices], + mask=mask2D) + + monthlyClimatologies = monthlyClimatologies.transpose(0, 2, 1, 3) + botMonthlyClimatologies = botMonthlyClimatologies.transpose(0, 2, 1) + return monthlyClimatologies, botMonthlyClimatologies + + +inGridName = 'SouthernOcean_0.167x0.167degree' + +inTFileName = '/media/xylar/extra_data/data_overflow/observations/' \ + 'SouthernOcean/SOSE/monthly/THETA_mnthlyBar.0000000100' +inSFileName = '/media/xylar/extra_data/data_overflow/observations/' \ + 'SouthernOcean/SOSE/monthly/SALT_mnthlyBar.0000000100' +inGridFileName = '/media/xylar/extra_data/data_overflow/observations/' \ + 'SouthernOcean/SOSE/grid.mat' + +prefix = 'SOSE_2005-2010_monthly_' + +cacheTFileName = '{}_pot_temp_{}.nc'.format(prefix, inGridName) +cacheSFileName = '{}_salinity_{}.nc'.format(prefix, inGridName) +outTFileName = '{}_pot_temp_{}.nc'.format(prefix, outGridName) +outSFileName = '{}_salinity_{}.nc'.format(prefix, outGridName) + +config = MpasAnalysisConfigParser() +config.read('config.default') + + +inDescriptor = LatLonGridDescriptor() + +if not os.path.exists(cacheTFileName) or not os.path.exists(cacheSFileName): + matGrid = loadmat(inGridFileName) + # lat/lon is a tensor grid so we can use 1-D arrays + lon = matGrid['XC'][:, 0] + lat = matGrid['YC'][0, :] + z = matGrid['RC'][:, 0] + cellFraction = matGrid['hFacC'] + + botIndices = get_bottom_indices(cellFraction) + +if os.path.exists(cacheTFileName): + dsT = xarray.open_dataset(cacheTFileName) +else: + field, botField = get_monthly_average(inTFileName) + + description = 'Monthly potential temperature climatologies from ' \ + '2005-2010 average of the Southern Ocean State Estimate ' \ + '(SOSE)' + botDescription = 'Monthly potential temperature climatologies at sea ' \ + 'floor from 2005-2010 average from SOSE' + dictonary = {'dims': ['Time', 'lon', 'lat', 'depth'], + 'coords': {'month': {'dims': ('Time'), + 'data': range(1, 13), + 'attrs': {'units': 'months'}}, + 'year': {'dims': ('Time'), + 'data': numpy.ones(12), + 'attrs': {'units': 'years'}}, + 'lon': {'dims': ('lon'), + 'data': lon, + 'attrs': {'units': 'degrees'}}, + 'lat': {'dims': ('lat'), + 'data': lat, + 'attrs': {'units': 'degrees'}}, + 'depth': {'dims': ('depth'), + 'data': z, + 'attrs': {'units': 'm'}}}, + 'data_vars': {'theta': + {'dims': ('Time', 'lat', 'lon', 'depth'), + 'data': field, + 'attrs': {'units': '$^\circ$C', + 'description': description}}, + 'botTheta': + {'dims': ('Time', 'lat', 'lon'), + 'data': botField, + 'attrs': {'units': '$^\circ$C', + 'description': botDescription}}}} + + dsT = xarray.Dataset.from_dict(dictonary) + dsT.to_netcdf(cacheTFileName) + +if os.path.exists(cacheSFileName): + dsS = xarray.open_dataset(cacheSFileName) +else: + field, botField = get_monthly_average(inSFileName) + + description = 'Monthly salinity climatologies from 2005-2010 ' \ + 'average of the Southern Ocean State Estimate (SOSE)' + botDescription = 'Monthly salinity climatologies at sea floor ' \ + 'from 2005-2010 average from SOSE' + dictonary = {'dims': ['Time', 'lon', 'lat', 'depth'], + 'coords': {'month': {'dims': ('Time'), + 'data': range(1, 13), + 'attrs': {'units': 'months'}}, + 'year': {'dims': ('Time'), + 'data': numpy.ones(12), + 'attrs': {'units': 'years'}}, + 'lon': {'dims': ('lon'), + 'data': lon, + 'attrs': {'units': 'degrees'}}, + 'lat': {'dims': ('lat'), + 'data': lat, + 'attrs': {'units': 'degrees'}}, + 'depth': {'dims': ('depth'), + 'data': z, + 'attrs': {'units': 'm'}}}, + 'data_vars': {'salinity': + {'dims': ('Time', 'lat', 'lon', 'depth'), + 'data': field, + 'attrs': {'units': 'PSU', + 'description': description}}, + 'botSalinity': + {'dims': ('Time', 'lat', 'lon'), + 'data': botField, + 'attrs': {'units': 'PSU', + 'description': botDescription}}}} + + dsS = xarray.Dataset.from_dict(dictonary) + dsS.to_netcdf(cacheSFileName) + +inDescriptor = LatLonGridDescriptor.read(cacheTFileName, latVarName='lat', + lonVarName='lon') + +outDescriptor = get_antarctic_stereographic_comparison_descriptor(config) +outGridName = outDescriptor.meshName + +mappingFileName = 'map_{}_to_{}.nc'.format(inGridName, outGridName) + +remapper = Remapper(inDescriptor, outDescriptor, mappingFileName) + +remapper.build_mapping_file(method='bilinear') + +remappedT = remapper.remap(dsT, renormalizationThreshold=0.01) + +remappedT.attrs['history'] = ' '.join(sys.argv) +remappedT.to_netcdf(outTFileName) + +remappedS = remapper.remap(dsS, renormalizationThreshold=0.01) + +remappedS.attrs['history'] = ' '.join(sys.argv) +remappedS.to_netcdf(outSFileName) + +normT = colors.Normalize(vmin=-2.0, vmax=2.0) +normS = colors.Normalize(vmin=33.0, vmax=35.0) + +plt.figure() +plt.imshow(remappedT.botTheta.values[0, :, :], origin='lower', cmap='RdBu_r', + norm=normT) +plt.colorbar() +plt.figure() +plt.imshow(remappedS.botSalinity.values[0, :, :], origin='lower', + cmap='RdBu_r', norm=normS) +plt.colorbar() + +plt.show() diff --git a/preprocess_observations/remap_rignot.py b/preprocess_observations/remap_rignot.py new file mode 100644 index 000000000..3abaef9e0 --- /dev/null +++ b/preprocess_observations/remap_rignot.py @@ -0,0 +1,71 @@ +import numpy +import xarray +import matplotlib.pyplot as plt +import matplotlib.colors as colors +import pyproj +import sys + +from mpas_analysis.shared.interpolation import Remapper +from mpas_analysis.shared.grid import ProjectionGridDescriptor +from mpas_analysis.shared.mpas_xarray.mpas_xarray import subset_variables +from mpas_analysis.shared.climatology \ + import get_Antarctic_stereographic_comparison_descriptor +from mpas_analysis.configuration.MpasAnalysisConfigParser \ + import MpasAnalysisConfigParser + +inFileName = '/media/xylar/extra_data/data_overflow/observations/Antarctica/' \ + 'Rignot_et_al._2013/Ant_MeltingRate.nc' + +config = MpasAnalysisConfigParser() +config.read('config.default') + +ds = xarray.open_dataset(inFileName) +ds = subset_variables(ds, ['melt_actual', 'xaxis', 'yaxis']) +lx = numpy.abs(1e-3*(ds.xaxis.values[-1]-ds.xaxis.values[0])) +ly = numpy.abs(1e-3*(ds.yaxis.values[-1]-ds.yaxis.values[0])) + +maskedMeltRate = numpy.ma.masked_array(ds.melt_actual, + mask=(ds.melt_actual.values == 0.)) + +ds['meltRate'] = xarray.DataArray(maskedMeltRate, dims=ds.melt_actual.dims, + coords=ds.melt_actual.coords, + attrs=ds.melt_actual.attrs) + +ds = ds.drop('melt_actual') + +inGridName = '{}x{}km_1.0km_Antarctic_stereo'.format(lx, ly) + +projection = pyproj.Proj('+proj=stere +lat_ts=-71.0 +lat_0=-90 +lon_0=0.0 ' + '+k_0=1.0 +x_0=0.0 +y_0=0.0 +ellps=WGS84') + +inDescriptor = ProjectionGridDescriptor(projection) + +inDescriptor.read(inFileName, xVarName='xaxis', yVarName='yaxis', + meshName=inGridName) + +outDescriptor = get_Antarctic_stereographic_comparison_descriptor(config) +outGridName = outDescriptor.meshName + +outFileName = 'Rignot_2013_melt_rates_{}.nc'.format(outGridName) + +mappingFileName = 'map_{}_to_{}.nc'.format(inGridName, outGridName) + +remapper = Remapper(inDescriptor, outDescriptor, mappingFileName) + +remapper.build_mapping_file(method='bilinear') + +remappedDataset = remapper.remap(ds, renormalizationThreshold=0.01) + +remappedDataset.attrs['history'] = ' '.join(sys.argv) +remappedDataset.to_netcdf(outFileName) + +norm = colors.SymLogNorm(linthresh=1, linscale=1, vmin=-100.0, vmax=100.0) + +plt.figure() +plt.imshow(maskedMeltRate, origin='upper', norm=norm) +plt.colorbar() +plt.figure() +plt.imshow(remappedDataset.meltRate.values, origin='lower', norm=norm) +plt.colorbar() + +plt.show() diff --git a/run_mpas_analysis b/run_mpas_analysis index 125f79f66..b3f048e76 100755 --- a/run_mpas_analysis +++ b/run_mpas_analysis @@ -79,6 +79,11 @@ def build_analysis_list(config): # {{{ analyses.append(ocean.ClimatologyMapSSS(config, oceanClimatolgyTask)) analyses.append(ocean.ClimatologyMapAntarcticMelt(config, oceanClimatolgyTask)) + analyses.append(ocean.ClimatologyMapSoseTemperature(config, + oceanClimatolgyTask)) + analyses.append(ocean.ClimatologyMapSoseSalinity(config, + oceanClimatolgyTask)) + analyses.append(oceanTimeSeriesTask) analyses.append(ocean.TimeSeriesOHC(config, oceanTimeSeriesTask)) analyses.append(ocean.TimeSeriesSST(config, oceanTimeSeriesTask))