forked from knaughten/mitgcm_python
-
Notifications
You must be signed in to change notification settings - Fork 0
/
postprocess.py
1755 lines (1530 loc) · 86.2 KB
/
postprocess.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#######################################################
# Files to create when the simulation is done
#######################################################
import os
import sys
import numpy as np
import shutil
import netCDF4 as nc
from .grid import Grid
from .file_io import NCfile, netcdf_time, find_time_index, read_netcdf, read_iceprod
from .timeseries import calc_timeseries, calc_special_timeseries, set_parameters
from .utils import real_dir, days_per_month, str_is_int, mask_3d, mask_except_ice, mask_land, mask_land_ice, select_top, select_bottom, mask_outside_box, var_min_max, add_time_dim, apply_mask, convert_ismr, mask_2d_to_3d, xy_to_xyz, z_to_xyz, depth_of_isoline
from .constants import deg_string, region_names
from .calculus import area_average, vertical_average
from .diagnostics import density, thermocline, adv_heat_wrt_freezing, potential_density
from .interpolation import interp_grid
# Helper function to build lists of output files in a directory.
# If unravelled=True, it will look for filenames of the form 1979.nc, etc. If unravelled=False, it will look for filenames of the form output_001.nc, etc.
def build_file_list (output_dir, unravelled=False):
output_files = []
for file in os.listdir(output_dir):
if (unravelled and file[0] in ['1', '2'] and file.endswith('.nc')) or (not unravelled and file.startswith('output_') and file.endswith('.nc')):
output_files.append(output_dir+file)
# Make sure in chronological order
output_files.sort()
return output_files
# Helper function to get all the output directories from a coupled model, one per segment, in order.
def get_segment_dir (output_dir):
segment_dir = []
for name in os.listdir(output_dir):
# Look for directories composed of numbers (date codes)
if os.path.isdir(output_dir+name) and str_is_int(name):
segment_dir.append(name)
# Make sure in chronological order
segment_dir.sort()
return segment_dir
# Either call get_segment_dir (if segment_dir is None), or make sure segment_dir is an array (and not a string, i.e. just one entry).
def check_segment_dir (output_dir, segment_dir):
if segment_dir is None:
segment_dir = get_segment_dir(output_dir)
else:
if isinstance(segment_dir, str):
segment_dir = [segment_dir]
return segment_dir
# Get the path to the MITgcm output file for each segment.
def segment_file_paths (output_dir, segment_dir=None, file_name='output.nc'):
if segment_dir is None:
segment_dir = get_segment_dir(output_dir)
file_paths = []
for sdir in segment_dir:
file_paths.append(output_dir + sdir + '/MITgcm/' + file_name)
return file_paths
# Make a bunch of plots when the simulation is done.
# This will keep evolving over time!
# Optional keyword arguments:
# output_dir: path to directory containing output NetCDF files (assumed to be in one file per segment a la scripts/convert_netcdf.py)
# timeseries_file: filename created by precompute_timeseries, within output_dir
# grid_path: path to binary grid directory, or NetCDF file containing grid variables
# fig_dir: path to directory to save figures in
# file_path: specific output file to analyse for non-time-dependent plots (default the most recent segment)
# monthly: as in function netcdf_time
# unravelled: set to True if the simulation is done and you've run netcdf_finalise.sh, so the files are 1979.nc, 1980.nc, etc. instead of output_001.nc, output_002., etc.
def plot_everything (output_dir='./', timeseries_file='timeseries.nc', grid_path=None, fig_dir='.', file_path=None, monthly=True, date_string=None, time_index=-1, time_average=True, unravelled=False, key='WSFRIS', hovmoller_file='hovmoller.nc', ctd_file='../../ctddatabase.mat'):
from .plot_1d import read_plot_timeseries, read_plot_timeseries_multi
from .plot_latlon import read_plot_latlon
from .plot_slices import read_plot_ts_slice
from .plot_misc import read_plot_hovmoller_ts
from .plot_misc import ctd_cast_compare, amundsen_rignot_comparison
if time_average:
time_index = None
# Make sure proper directories
output_dir = real_dir(output_dir)
fig_dir = real_dir(fig_dir)
# Build the list of output files in this directory (use them all for timeseries)
if key in ['WSFRIS', 'FRIS']:
# Coupled
output_files = segment_file_paths(output_dir)
else:
# Uncoupled
output_files = build_file_list(output_dir, unravelled=unravelled)
if file_path is None:
# Select the last file for single-timestep analysis
file_path = output_files[-1]
# Build the grid
if grid_path is None:
grid_path = file_path
grid = Grid(grid_path)
# Timeseries
if key == 'WSS':
var_names = ['fris_mass_balance', 'eta_avg', 'seaice_area', 'fris_temp', 'fris_salt', 'fris_age']
elif key == 'WSK':
var_names = ['fris_mass_balance', 'hice_corner', 'mld_ewed', 'eta_avg', 'seaice_area', 'fris_temp', 'fris_salt']
elif key == 'WSFRIS':
var_names = ['fris_mass_balance', 'fris_massloss', 'fris_temp', 'fris_salt', 'fris_density', 'sws_shelf_temp', 'sws_shelf_salt', 'sws_shelf_density', 'filchner_trough_temp', 'filchner_trough_salt', 'filchner_trough_density', 'wdw_core_temp', 'wdw_core_salt', 'wdw_core_density', 'seaice_area', 'wed_gyre_trans', 'filchner_trans', 'sws_shelf_iceprod'] #['fris_mass_balance', 'hice_corner', 'mld_ewed', 'fris_temp', 'fris_salt', 'ocean_vol', 'eta_avg', 'seaice_area']
elif key == 'FRIS':
var_names = ['fris_mass_balance', 'fris_temp', 'fris_salt', 'ocean_vol', 'eta_avg', 'seaice_area']
elif key == 'PAS':
melt_names = ['getz_melting', 'dotson_crosson_melting', 'thwaites_melting', 'pig_melting', 'cosgrove_melting', 'abbot_melting', 'venable_melting']
read_plot_timeseries_multi(melt_names, output_dir+timeseries_file, precomputed=True, fig_name=fig_dir+'timeseries_multi_melt.png', monthly=monthly)
var_names = ['eta_avg', 'seaice_area']
for var in var_names:
read_plot_timeseries(var, output_dir+timeseries_file, precomputed=True, fig_name=fig_dir+'timeseries_'+var+'.png', monthly=monthly)
# Hovmoller plots, CTD casts, and melt rate comparisons
if key == 'PAS':
for loc in ['pine_island_bay', 'dotson_bay']:
read_plot_hovmoller_ts(hovmoller_file, loc, grid, tmax=1.5, smin=34, t_contours=[0,1], s_contours=[34.5, 34.7], fig_name=fig_dir+'hovmoller_ts_'+loc+'.png', monthly=monthly, smooth=12)
ctd_cast_compare(loc, hovmoller_file, ctd_file, grid, fig_name=fig_dir+'casts_'+loc+'.png')
amundsen_rignot_comparison(output_dir+timeseries_file, precomputed=True, fig_name=fig_dir+'rignot.png')
# Lat-lon plots
var_names = ['ismr', 'bwtemp', 'bwsalt', 'sst', 'sss', 'aice', 'hice', 'eta', 'vel', 'velice']
if key in ['WSS', 'WSK', 'FRIS', 'WSFRIS']:
var_names += ['hsnow', 'mld', 'saltflx', 'psi', 'iceprod']
if key in ['WSS', 'WSK']:
var_names += ['bwage']
for var in var_names:
# Customise bounds and zooming
vmin = None
vmax = None
zoom_fris = False
ymax = None
chunk = None
fig_name = fig_dir + var + '.png'
if key == 'PAS' and var in ['bwsalt', 'bwtemp', 'hice', 'ismr', 'vel', 'velice']:
ymax = -70
if var == 'bwtemp':
if key == 'WSS':
vmin = -2.5
vmax = -1.5
elif key in ['WSK', 'WSFRIS']:
vmax = 1
if var == 'bwsalt':
if key == 'PAS':
vmin = 34.1
else:
vmin = 34.3
if var == 'bwage':
vmin = 0
if key == 'WSS':
vmax = 12
if var == 'eta':
vmin = -2.5
if var == 'hice':
if key == 'PAS':
vmax = 2
else:
vmax = 4
if var == 'saltflx':
vmin = -0.001
vmax = 0.001
if var == 'iceprod':
vmin = 0
vmax = 5
if var == 'psi' and key in ['WSS', 'FRIS']:
vmin = -0.5
vmax = 0.5
if var in ['vel', 'velice'] and key=='WSS':
chunk = 6
if not zoom_fris and key in ['WSK', 'WSFRIS']:
figsize = (10,6)
elif key == 'PAS':
if ymax == -70:
figsize = (14,5)
else:
figsize = (12,6)
else:
figsize = (8,6)
# Plot
read_plot_latlon(var, file_path, grid=grid, time_index=time_index, time_average=time_average, vmin=vmin, vmax=vmax, zoom_fris=zoom_fris, ymax=ymax, fig_name=fig_name, date_string=date_string, figsize=figsize, chunk=chunk)
# Make additional plots if needed
if key in ['WSK', 'WSFRIS'] and var in ['ismr', 'vel', 'bwtemp', 'bwsalt', 'psi', 'bwage']:
# Make another plot zoomed into FRIS
figsize = (8,6)
# First adjust bounds
if var == 'bwtemp':
vmax = -1.5
if var == 'bwage':
vmax = 10
if var == 'psi':
vmax = 0.5
read_plot_latlon(var, file_path, grid=grid, time_index=time_index, time_average=time_average, vmin=vmin, vmax=vmax, zoom_fris=True, fig_name=fig_dir+var+'_zoom.png', date_string=date_string, figsize=figsize)
if var == 'vel':
# Call the other options for vertical transformations
if key in ['WSK', 'WSFRIS']:
figsize = (10,6)
for vel_option in ['sfc', 'bottom']:
read_plot_latlon(var, file_path, grid=grid, time_index=time_index, time_average=time_average, vel_option=vel_option, vmin=vmin, vmax=vmax, zoom_fris=zoom_fris, ymax=ymax, fig_name=fig_dir+var+'_'+vel_option+'.png', date_string=date_string, figsize=figsize, chunk=chunk)
if var in ['eta', 'hice']:
# Make another plot with unbounded colour bar
read_plot_latlon(var, file_path, grid=grid, time_index=time_index, time_average=time_average, zoom_fris=zoom_fris, ymax=ymax, fig_name=fig_dir + var + '_unbound.png', date_string=date_string, figsize=figsize)
# Slice plots
if key in ['WSK', 'WSS', 'WSFRIS', 'FRIS']:
read_plot_ts_slice(file_path, grid=grid, lon0=-40, hmax=-75, zmin=-1450, time_index=time_index, time_average=time_average, fig_name=fig_dir+'ts_slice_filchner.png', date_string=date_string)
read_plot_ts_slice(file_path, grid=grid, lon0=-55, hmax=-72, time_index=time_index, time_average=time_average, fig_name=fig_dir+'ts_slice_ronne.png', date_string=date_string)
if key in ['WSK', 'WSFRIS']:
read_plot_ts_slice(file_path, grid=grid, lon0=0, time_index=time_index, time_average=time_average, fig_name=fig_dir+'ts_slice_eweddell.png', date_string=date_string)
# Given lists of files from two simulations, find the file and time indices corresponding to the last year (if option='last_year') or last month/timestep (if option='last_month') in the shortest simulation.
def select_common_time (output_files_1, output_files_2, option='last_year', monthly=True, check_match=True):
if not monthly and option == 'last_year':
print('Error (select_common_time): need monthly output to correctly select the last year.')
sys.exit()
# Concatenate the time arrays from all files
time_1 = calc_timeseries(output_files_1, option='time')
time_2 = calc_timeseries(output_files_2, option='time')
# Find the last time index in the shortest simulation
time_index = min(time_1.size, time_2.size) - 1
file_path_1, time_index_1 = find_time_index(output_files_1, time_index)
file_path_2, time_index_2 = find_time_index(output_files_2, time_index)
if check_match:
# Make sure we got this right
if netcdf_time(file_path_1, monthly=monthly)[time_index_1] != netcdf_time(file_path_2, monthly=monthly)[time_index_2]:
print('Error (select_common_time): something went wrong when matching time indices between the two files.')
sys.exit()
if option == 'last_year':
# Add 1 to get the upper bound on the time range we care about
t_end_1 = time_index_1 + 1
t_end_2 = time_index_2 + 1
# Find the index 12 before that
t_start_1 = t_end_1 - 12
t_start_2 = t_end_2 - 12
# Make sure it's still contained within one file
if t_start_1 < 0 or t_start_2 < 0:
print("Error (select_common_time): option last_year doesn't work if that year isn't contained within one file, for both simulations.")
sys.exit()
# Set the other options
time_index_1 = None
time_index_2 = None
time_average = True
elif option == 'last_month':
# Set the other options
t_start_1 = None
t_start_2 = None
t_end_1 = None
t_end_2 = None
time_average = False
else:
print(('Error (select_common_time): invalid option ' + option))
sys.exit()
return file_path_1, file_path_2, time_index_1, time_index_2, t_start_1, t_start_2, t_end_1, t_end_2, time_average
# Compare one simulation to another. Assumes the simulations have monthly averaged output. They don't need to be the same length.
# Keyword arguments:
# output_dir: as in function plot_everything
# baseline_dir: like output_dir, but for the simulation you want to use as a baseline. All the plots will show results from output_dir minus baseline_dir. This is the only non-optional keyword argument; it is a named keyword argument with no meaningful default so there's no chance of the user mixing up which simulation is which.
# timeseries_file: filename created by precompute_timeseries, within output_dir and baseline_dir
# grid_path: as in function plot_everything
# fig_dir: as in function plot_everything
# option: either 'last_year' (averages over the last 12 months of the overlapping period of the simulations) or 'last_month' (just considers the last month of the overlapping period).
# unravelled: as in function plot_everything
# file_name: name of file containing 1 time index, which is present in both directories.
def plot_everything_diff (output_dir='./', baseline_dir=None, timeseries_file='timeseries.nc', grid_path=None, fig_dir='.', option='last_year', unravelled=False, monthly=True, key='WSFRIS', hovmoller_file='hovmoller.nc', file_name=None):
from .plot_1d import read_plot_timeseries, read_plot_timeseries_multi
from .plot_latlon import read_plot_latlon_diff
from .plot_slices import read_plot_ts_slice_diff
from .plot_misc import read_plot_hovmoller_ts_diff
from .plot_utils.labels import parse_date
# Check that baseline_dir is set
# It's a keyword argument on purpose so that the user can't mix up which simulation is which.
if baseline_dir is None:
print('Error (plot_everything_diff): must set baseline_dir')
sys.exit()
# Make sure proper directories, and rename so 1=baseline, 2=current
output_dir_1 = real_dir(baseline_dir)
output_dir_2 = real_dir(output_dir)
fig_dir = real_dir(fig_dir)
# Build lists of output files in each directory
coupled = key in ['WSFRIS', 'FRIS']
if coupled:
output_files_1 = segment_file_paths(output_dir_1)
output_files_2 = segment_file_paths(output_dir_2)
else:
output_files_1 = build_file_list(output_dir_1, unravelled=unravelled)
output_files_2 = build_file_list(output_dir_2, unravelled=unravelled)
# Now figure out which time indices to use for plots with no time dependence
if file_name is None:
file_path_1, file_path_2, time_index_1, time_index_2, t_start_1, t_start_2, t_end_1, t_end_2, time_average = select_common_time(output_files_1, output_files_2, option=option, monthly=monthly, check_match=False)
# Set date string
if option == 'last_year':
date_string = 'year beginning ' + parse_date(file_path=file_path_1, time_index=t_start_1)
elif option == 'last_month':
date_string = parse_date(file_path=file_path_1, time_index=time_index_1)
else:
file_path_1 = output_dir_1 + file_name
file_path_2 = output_dir_2 + file_name
time_index_1 = 0
time_index_2 = 0
t_start_1 = None
t_start_2 = None
t_end_1 = None
t_end_2 = None
time_average = False
date_string = ''
# Build the grid
if grid_path is None:
grid_path = file_path_1
grid = Grid(grid_path)
# Timeseries through the entire simulation
if key == 'WSS':
var_names = ['fris_mass_balance', 'eta_avg', 'seaice_area', 'fris_temp', 'fris_salt', 'fris_age']
elif key == 'WSK':
var_names = ['fris_mass_balance', 'hice_corner', 'mld_ewed', 'eta_avg', 'seaice_area', 'fris_temp', 'fris_salt']
elif key == 'WSFRIS':
var_names = ['fris_mass_balance', 'fris_massloss', 'fris_temp', 'fris_salt', 'fris_density', 'sws_shelf_temp', 'sws_shelf_salt', 'sws_shelf_density', 'filchner_trough_temp', 'filchner_trough_salt', 'filchner_trough_density', 'wdw_core_temp', 'wdw_core_salt', 'wdw_core_density', 'seaice_area', 'wed_gyre_trans', 'filchner_trans', 'sws_shelf_iceprod'] #['fris_mass_balance', 'hice_corner', 'mld_ewed', 'fris_temp', 'fris_salt', 'ocean_vol', 'eta_avg', 'seaice_area']
elif key == 'FRIS':
var_names = ['fris_mass_balance', 'fris_temp', 'fris_salt', 'ocean_vol', 'eta_avg', 'seaice_area']
elif key == 'PAS':
melt_names = ['getz_melting', 'dotson_crosson_melting', 'thwaites_melting', 'pig_melting', 'cosgrove_melting', 'abbot_melting', 'venable_melting']
read_plot_timeseries_multi(melt_names, [output_dir_1+timeseries_file, output_dir_2+timeseries_file], diff=True, precomputed=True, fig_name=fig_dir+'timeseries_multi_melt_diff.png', monthly=monthly)
var_names = ['eta_avg', 'seaice_area']
for var in var_names:
read_plot_timeseries(var, [output_dir_1+timeseries_file, output_dir_2+timeseries_file], diff=True, precomputed=True, fig_name=fig_dir+'timeseries_'+var+'_diff.png', monthly=monthly)
# Hovmoller plots
if key == 'PAS':
for loc in ['pine_island_bay', 'dotson_bay']:
read_plot_hovmoller_ts_diff(output_dir_1+hovmoller_file, output_dir_2+hovmoller_file, loc, grid, fig_name=fig_dir+'hovmoller_ts_'+loc+'_diff.png', monthly=monthly, smooth=12)
# Now make lat-lon plots
var_names = ['ismr', 'bwtemp', 'bwsalt', 'sst', 'sss', 'aice', 'hice', 'hsnow', 'eta', 'vel', 'velice']
if key in ['WSK', 'WSS', 'WSFRIS', 'FRIS']:
var_names += ['iceprod', 'mld']
if key in ['WSK', 'WSS']:
var_names += ['bwage']
for var in var_names:
if var == 'iceprod':
vmin = -2
vmax = 2
else:
vmin = None
vmax = None
ymax = None
if key == 'PAS' and var in ['bwsalt', 'bwtemp', 'hice', 'ismr', 'vel', 'velice']:
ymax = -70
if key in ['WSK', 'WSFRIS']:
figsize = (10, 6)
elif key == 'PAS':
if ymax == -70:
figsize = (14, 5)
else:
figsize = (12, 6)
else:
figsize = (8, 6)
read_plot_latlon_diff(var, file_path_1, file_path_2, grid=grid, time_index=time_index_1, t_start=t_start_1, t_end=t_end_1, time_average=time_average, time_index_2=time_index_2, t_start_2=t_start_2, t_end_2=t_end_2, date_string=date_string, ymax=ymax, fig_name=fig_dir+var+'_diff.png', figsize=figsize, vmin=vmin, vmax=vmax, coupled=coupled)
# Zoom into some variables
if key in['WSK', 'WSFRIS'] and var in ['ismr', 'bwtemp', 'bwsalt', 'vel', 'bwage']:
if var == 'bwage':
vmin = -5
vmax = None
else:
vmin = None
vmax = None
read_plot_latlon_diff(var, file_path_1, file_path_2, grid=grid, time_index=time_index_1, t_start=t_start_1, t_end=t_end_1, time_average=time_average, time_index_2=time_index_2, t_start_2=t_start_2, t_end_2=t_end_2, zoom_fris=True, date_string=date_string, fig_name=fig_dir+var+'_zoom_diff.png', vmin=vmin, vmax=vmax, coupled=coupled)
if var == 'vel':
# Call the other options for vertical transformations
for vel_option in ['sfc', 'bottom']:
read_plot_latlon_diff(var, file_path_1, file_path_2, grid=grid, time_index=time_index_1, t_start=t_start_1, t_end=t_end_1, time_average=time_average, time_index_2=time_index_2, t_start_2=t_start_2, t_end_2=t_end_2, vel_option=vel_option, date_string=date_string, fig_name=fig_dir+var+'_'+vel_option+'_diff.png', coupled=coupled)
# Slice plots
if key in ['WSK', 'WSS', 'WSFRIS', 'FRIS']:
read_plot_ts_slice_diff(file_path_1, file_path_2, grid=grid, lon0=-40, hmax=-75, zmin=-1450, time_index=time_index_1, t_start=t_start_1, t_end=t_end_1, time_average=time_average, time_index_2=time_index_2, t_start_2=t_start_2, t_end_2=t_end_2, date_string=date_string, fig_name=fig_dir+'ts_slice_filchner_diff.png', coupled=coupled)
read_plot_ts_slice_diff(file_path_1, file_path_2, grid=grid, lon0=-55, hmax=-72, time_index=time_index_1, t_start=t_start_1, t_end=t_end_1, time_average=time_average, time_index_2=time_index_2, t_start_2=t_start_2, t_end_2=t_end_2, date_string=date_string, fig_name=fig_dir+'ts_slice_ronne_diff.png', coupled=coupled)
if key in ['WSK', 'WSFRIS']:
read_plot_ts_slice_diff(file_path_1, file_path_2, grid=grid, lon0=0, zmin=-2000, time_index=time_index_1, t_start=t_start_1, t_end=t_end_1, time_average=time_average, time_index_2=time_index_2, t_start_2=t_start_2, t_end_2=t_end_2, date_string=date_string, fig_name=fig_dir+'ts_slice_eweddell_diff.png', coupled=coupled)
# Plot the sea ice annual min and max for each year of the simulation. First you have to concatenate the sea ice area into a single file, such as:
# ncrcat -v SIarea output_*.nc aice_tot.nc
# Arguments:
# file_path: path to concatenated NetCDF file with sea ice area for the entire simulation
# Optional keyword arguments:
# grid_path: as in function plot_everything
# fig_dir: path to directory to save figures in
# monthly: as in function netcdf_time
def plot_seaice_annual (file_path, grid_path='../grid/', fig_dir='.', monthly=True):
from .plot_latlon import plot_aice_minmax
fig_dir = real_dir(fig_dir)
grid = Grid(grid_path)
time = netcdf_time(file_path, monthly=monthly)
first_year = time[0].year
if time[0].month > 2:
first_year += 1
last_year = time[-1].year
if time[-1].month < 8:
last_year -= 1
for year in range(first_year, last_year+1):
plot_aice_minmax(file_path, year, grid=grid, fig_name=fig_dir+'aice_minmax_'+str(year)+'.png')
# Helper functions for precompute_timeseries and precompute_hovmoller:
# Check if the precomputed file already exists, and either open it or create it.
def set_update_file (precomputed_file, grid, dimensions, rho=None):
if os.path.isfile(precomputed_file):
# Open it
id = nc.Dataset(precomputed_file, 'a')
if (rho is not None) and not (id.variables['rho'][:] == rho).all():
print('Error (set_update_file): the density axis has changed.')
sys.exit()
return id
else:
# Create it
return NCfile(precomputed_file, grid, dimensions, rho=rho)
# Define or update the time axis.
def set_update_time (id, mit_file, monthly=True, time_average=False):
# Read the time array from the MITgcm file, and its units
time, time_units, calendar = netcdf_time(mit_file, return_units=True, monthly=monthly)
if time_average:
# Only save the first one
time = np.array([time[0]])
if isinstance(id, nc.Dataset):
# File is being updated
# Update the units to match the old time array
time_units = id.variables['time'].units
# Also figure out how many time indices are in the file so far
num_time = id.variables['time'].size
# Convert to numeric values
time = nc.date2num(time, time_units, calendar=calendar)
# Append to file
id.variables['time'][num_time:] = time
return num_time
else:
# File is new
# Add the time variable to the file
id.add_time(time, units=time_units, calendar=calendar)
return 0
# Define or update non-time variables.
def set_update_var (id, num_time, data, dimensions, var_name, title, units):
if isinstance(id, nc.Dataset):
# File is being updated
# Append to file
id.variables[var_name][num_time:] = data
elif isinstance(id, NCfile):
# File is new
# Add the variable to the file
id.add_variable(var_name, data, dimensions, long_name=title, units=units)
else:
print('Error (set_update_var): unknown id type')
sys.exit()
# Pre-compute timeseries and save them in a NetCDF file which concatenates after each simulation segment.
# Arguments:
# mit_file: path to a single NetCDF file output by MITgcm
# timeseries_file: path to a NetCDF file for saving timeseries. If it exists, it will be appended to; if it doesn't exist, it will be created.
# Optional keyword arguments:
# timeseries_types: list of timeseries types to compute (subset of the options from set_parameters). If None, a default set will be used.
# lon0, lat0: if timeseries_types includes 'temp_polynya' and/or 'salt_polynya', use these points as the centre.
def precompute_timeseries (mit_file, timeseries_file, timeseries_types=None, monthly=True, lon0=None, lat0=None, key='PAS', eosType='MDJWF', rhoConst=None, Tref=None, Sref=None, tAlpha=None, sBeta=None, time_average=False, grid=None):
# Timeseries to compute
if timeseries_types is None:
if key == 'WSS':
timeseries_types = ['fris_mass_balance', 'eta_avg', 'seaice_area', 'fris_temp', 'fris_salt', 'fris_age']
elif key == 'WSK':
timeseries_types = ['fris_mass_balance', 'hice_corner', 'mld_ewed', 'eta_avg', 'seaice_area', 'fris_temp', 'fris_salt']
elif key == 'PAS':
timeseries_types = ['dotson_crosson_melting', 'thwaites_melting', 'pig_melting', 'getz_melting', 'cosgrove_melting', 'abbot_melting', 'venable_melting', 'eta_avg', 'hice_max', 'pine_island_bay_temp_btw_200_700m', 'pine_island_bay_salt_btw_200_700m', 'dotson_bay_temp_btw_200_700m', 'dotson_bay_salt_btw_200_700m', 'inner_amundsen_shelf_temp_btw_200_700m', 'inner_amundsen_shelf_salt_btw_200_700m', 'amundsen_shelf_break_uwind_avg', 'dotson_massloss', 'pig_massloss', 'getz_massloss', 'inner_amundsen_shelf_sss_avg']
# Build the grid
if grid is None:
grid = Grid(mit_file)
if any (['density' in s for s in timeseries_types]):
# Precompute density so we don't have to re-calculate it for each density variable. If there's only one density variable, this won't make a difference.
temp = read_netcdf(mit_file, 'THETA', time_average=time_average)
salt = read_netcdf(mit_file, 'SALT', time_average=time_average)
rho = density(eosType, salt, temp, 0, rhoConst=rhoConst, Tref=Tref, Sref=Sref, tAlpha=tAlpha, sBeta=sBeta)
else:
rho = None
# Set up or update the file and time axis
id = set_update_file(timeseries_file, grid, 't')
num_time = set_update_time(id, mit_file, monthly=monthly, time_average=time_average)
# Now process all the timeseries
for ts_name in timeseries_types:
print(('Processing ' + ts_name))
# Get information about the variable; only care about title and units
title, units = set_parameters(ts_name)[2:4]
if ts_name == 'fris_mass_balance':
melt, freeze = calc_special_timeseries(ts_name, mit_file, grid=grid, monthly=monthly, time_average=time_average)[1:]
# We need two titles now
title_melt = 'Total melting beneath FRIS'
title_freeze = 'Total refreezing beneath FRIS'
# Update two variables
set_update_var(id, num_time, melt, 't', 'fris_total_melt', title_melt, units)
set_update_var(id, num_time, freeze, 't', 'fris_total_freeze', title_freeze, units)
else:
data = calc_special_timeseries(ts_name, mit_file, grid=grid, lon0=lon0, lat0=lat0, monthly=monthly, rho=rho, time_average=time_average)[1]
set_update_var(id, num_time, data, 't', ts_name, title, units)
id.close()
# Precompute ocean timeseries from a coupled UaMITgcm simulation.
# Optional keyword arguments:
# output_dir: path to master output directory for experiment. Default the current directory.
# timeseries_file: as in precompute_timeseries. Default 'timeseries.nc'.
# file_name: name of the output NetCDF file within the output/XXXXXX/MITgcm/ directories. Default 'output.nc'.
# segment_dir: list of date codes, in chronological order, corresponding to the subdirectories within output_dir. This must be specified if timeseries_file already exists. If it is not specified, all available subdirectories of output_dir will be used.
# timeseries_types: as in precompute_timeseries
# time_average: Average each year to create an annually averaged timeseries
def precompute_timeseries_coupled (output_dir='./', timeseries_file='timeseries.nc', hovmoller_file='hovmoller.nc', file_name='output.nc', segment_dir=None, timeseries_types=None, hovmoller_loc=None, key='PAS', time_average=False):
if timeseries_types is None:
if key == 'WSFRIS':
timeseries_types = ['fris_mass_balance', 'fris_massloss', 'fris_temp', 'fris_salt', 'fris_density', 'sws_shelf_temp', 'sws_shelf_salt', 'sws_shelf_density', 'filchner_trough_temp', 'filchner_trough_salt', 'filchner_trough_density', 'wdw_core_temp', 'wdw_core_salt', 'wdw_core_density', 'seaice_area', 'wed_gyre_trans', 'filchner_trans', 'sws_shelf_iceprod']
#timeseries_types = ['fris_mass_balance', 'hice_corner', 'mld_ewed', 'fris_temp', 'fris_salt', 'ocean_vol', 'eta_avg', 'seaice_area']
elif key == 'WSFRIS_pt2':
timeseries_types = ['fris_massloss', 'fris_temp', 'filchner_trough_temp', 'filchner_trough_salt', 'ronne_depression_temp', 'ronne_depression_salt', 'ronne_depression_iceprod', 'ronne_depression_atemp_avg', 'ronne_depression_wind_avg', 'ronne_depression_sst_avg', 'ronne_depression_sss_avg', 'sws_shelf_temp', 'sws_shelf_salt', 'sws_shelf_iceprod', 'sws_shelf_atemp_avg', 'sws_shelf_wind_avg', 'sws_shelf_sst_avg', 'sws_shelf_sss_avg', 'sws_shelf_pminuse']
elif key == 'WSFRIS_diag':
timeseries_types = ['sws_shelf_salt_adv', 'sws_shelf_salt_dif', 'sws_shelf_salt_sfc', 'sws_shelf_salt_sfc_corr', 'sws_shelf_salt_tend', 'sws_shelf_seaice_melt', 'sws_shelf_seaice_freeze', 'sws_shelf_pmepr', 'fris_age']
elif key == 'FRIS':
timeseries_types = ['fris_mass_balance', 'fris_temp', 'fris_salt', 'ocean_vol', 'eta_avg', 'seaice_area']
elif key == 'PAS':
timeseries_types = ['dotson_crosson_melting', 'thwaites_melting', 'pig_melting', 'getz_melting', 'cosgrove_melting', 'abbot_melting', 'venable_melting', 'eta_avg', 'hice_max', 'pine_island_bay_temp_below_500m', 'pine_island_bay_salt_below_500m', 'dotson_bay_temp_below_500m', 'dotson_bay_salt_below_500m', 'inner_amundsen_shelf_temp_below_500m', 'inner_amundsen_shelf_salt_below_500m', 'amundsen_shelf_break_uwind_avg', 'dotson_massloss', 'pig_massloss', 'getz_massloss', 'inner_amundsen_shelf_sss_avg', 'amundsen_shelf_break_adv_heat_ns_300_1500m']
if hovmoller_loc is None:
if key == 'PAS':
hovmoller_loc = ['pine_island_bay', 'dotson_bay']
else:
hovmoller_loc = []
output_dir = real_dir(output_dir)
if segment_dir is None and os.path.isfile(output_dir+timeseries_file):
print(('Error (precompute_timeseries_coupled): since ' + timeseries_file + ' exists, you must specify segment_dir'))
sys.exit()
segment_dir = check_segment_dir(output_dir, segment_dir)
file_paths = segment_file_paths(output_dir, segment_dir, file_name)
# Call precompute_timeseries for each segment
for file_path in file_paths:
print(('Processing ' + file_path))
precompute_timeseries(file_path, output_dir+timeseries_file, timeseries_types=timeseries_types, monthly=True, time_average=time_average)
if len(hovmoller_loc) > 0 and hovmoller_loc is not None:
precompute_hovmoller(file_path, output_dir+hovmoller_file, loc=hovmoller_loc)
# Make animations of lat-lon variables throughout a coupled UaMITgcm simulation, and also images of the first and last frames.
# Currently supported: ismr, bwtemp, bwsalt, draft, aice, hice, mld, eta, psi.
def animate_latlon_coupled (var, output_dir='./', file_name='output.nc', segment_dir=None, vmin=None, vmax=None, change_points=None, mov_name=None, fig_name_beg=None, fig_name_end=None, figsize=(8,6), zoom_fris=False):
import matplotlib.animation as animation
import matplotlib.pyplot as plt
from .plot_latlon import latlon_plot
from .plot_utils.labels import parse_date
from .plot_utils.colours import get_extend
output_dir = real_dir(output_dir)
segment_dir = check_segment_dir(output_dir, segment_dir)
file_paths = segment_file_paths(output_dir, segment_dir, file_name)
# Inner function to read and process data from a single file
def read_process_data (file_path, var_name, grid, mask_option='3d', gtype='t', lev_option=None, ismr=False, psi=False):
data = read_netcdf(file_path, var_name)
if mask_option == '3d':
data = mask_3d(data, grid, gtype=gtype, time_dependent=True)
elif mask_option == 'except_ice':
data = mask_except_ice(data, grid, gtype=gtype, time_dependent=True)
elif mask_option == 'land':
data = mask_land(data, grid, gtype=gtype, time_dependent=True)
elif mask_option == 'land_ice':
data = mask_land_ice(data, grid, gtype=gtype, time_dependent=True)
else:
print(('Error (read_process_data): invalid mask_option ' + mask_option))
sys.exit()
if lev_option is not None:
if lev_option == 'top':
data = select_top(data)
elif lev_option == 'bottom':
data = select_bottom(data)
else:
print(('Error (read_process_data): invalid lev_option ' + lev_option))
sys.exit()
if ismr:
data = convert_ismr(data)
if psi:
data = np.sum(data, axis=-3)*1e-6
return data
all_data = []
all_grids = []
all_dates = []
# Loop over segments
for file_path in file_paths:
print(('Processing ' + file_path))
# Build the grid
grid = Grid(file_path)
# Read and process the variable we need
ctype = 'basic'
gtype = 't'
include_shelf = var not in ['aice', 'hice', 'mld']
if var == 'ismr':
data = read_process_data(file_path, 'SHIfwFlx', grid, mask_option='except_ice', ismr=True)
title = 'Ice shelf melt rate (m/y)'
ctype = 'ismr'
elif var == 'bwtemp':
data = read_process_data(file_path, 'THETA', grid, lev_option='bottom')
title = 'Bottom water temperature ('+deg_string+'C)'
elif var == 'bwsalt':
data = read_process_data(file_path, 'SALT', grid, lev_option='bottom')
title = 'Bottom water salinity (psu)'
elif var == 'draft':
data = mask_except_ice(grid.draft, grid)
title = 'Ice shelf draft (m)'
elif var == 'aice':
data = read_process_data(file_path, 'SIarea', grid, mask_option='land_ice')
title = 'Sea ice concentration'
elif var == 'hice':
data = read_process_data(file_path, 'SIheff', grid, mask_option='land_ice')
title = 'Sea ice thickness (m)'
elif var == 'mld':
data = read_process_data(file_path, 'MXLDEPTH', grid, mask_option='land_ice')
title = 'Mixed layer depth (m)'
elif var == 'eta':
data = read_process_data(file_path, 'ETAN', grid, mask_option='land')
title = 'Free surface (m)'
elif var == 'psi':
data = read_process_data(file_path, 'PsiVEL', grid, psi=True)
title = 'Vertically integrated streamfunction (Sv)'
ctype = 'plusminus'
else:
print(('Error (animate_latlon): invalid var ' + var))
sys.exit()
# Loop over timesteps
if var == 'draft':
# Just one timestep
all_data.append(data)
all_grids.append(grid)
all_dates.append(parse_date(file_path=file_path, time_index=0))
else:
for t in range(data.shape[0]):
# Extract the data from this timestep
# Save it and the grid to the long lists
all_data.append(data[t,:])
all_grids.append(grid)
all_dates.append(parse_date(file_path=file_path, time_index=t))
extend = get_extend(vmin=vmin, vmax=vmax)
vmin_tmp = np.amax(data)
vmax_tmp = np.amin(data)
for elm in all_data:
vmin_2, vmax_2 = var_min_max(elm, grid, zoom_fris=zoom_fris)
vmin_tmp = min(vmin_tmp, vmin_2)
vmax_tmp = max(vmax_tmp, vmax_2)
if vmin is None:
vmin = vmin_tmp
if vmax is None:
vmax = vmax_tmp
num_frames = len(all_data)
# Make the first and last frames as stills
tsteps = [0, -1]
fig_names = [fig_name_beg, fig_name_end]
for t in range(2):
latlon_plot(all_data[tsteps[t]], all_grids[tsteps[t]], gtype=gtype, ctype=ctype, vmin=vmin, vmax=vmax, change_points=change_points, title=title, date_string=all_dates[tsteps[t]], figsize=figsize, fig_name=fig_names[t], zoom_fris=zoom_fris)
# Now make the animation
fig, ax = plt.subplots(figsize=figsize)
# Inner function to plot a frame
def plot_one_frame (t):
img = latlon_plot(all_data[t], all_grids[t], ax=ax, gtype=gtype, ctype=ctype, vmin=vmin, vmax=vmax, change_points=change_points, title=title+'\n'+all_dates[t], make_cbar=False, zoom_fris=zoom_fris)
if t==0:
return img
# First frame
img = plot_one_frame(0)
plt.colorbar(img, extend=extend)
# Function to update figure with the given frame
def animate(t):
print(('Frame ' + str(t) + ' of ' + str(num_frames)))
ax.cla()
plot_one_frame(t)
# Call this for each frame
anim = animation.FuncAnimation(fig, func=animate, frames=list(range(num_frames)))
writer = animation.FFMpegWriter(bitrate=500, fps=12)
anim.save(mov_name, writer=writer)
if mov_name is not None:
print(('Saving ' + mov_name))
anim.save(mov_name, writer=writer)
else:
plt.show()
# When the model crashes, convert its crash-dump to a NetCDF file.
# Arguments:
# crash_dir: directory including all the state*crash.* files. The NetCDF file will be saved here too, with the name crash.nc.
# grid_path: as in function plot_everything
def crash_to_netcdf (crash_dir, grid_path):
from MITgcmutils import rdmds
# Make sure crash_dir is a proper directory
crash_dir = real_dir(crash_dir)
# Read the grid
grid = Grid(grid_path)
# Initialise the NetCDF file
ncfile = NCfile(crash_dir+'crash.nc', grid, 'xyz')
# Find all the crash files
for file in os.listdir(crash_dir):
if file.startswith('stateThetacrash') and file.endswith('.data'):
# Found temperature
# Read it from binary
temp = rdmds(crash_dir + file.replace('.data', ''))
# Write it to NetCDF
ncfile.add_variable('THETA', temp, 'xyz', units='C')
if file.startswith('stateSaltcrash') and file.endswith('.data'):
salt = rdmds(crash_dir + file.replace('.data', ''))
ncfile.add_variable('SALT', salt, 'xyz', units='psu')
if file.startswith('stateUvelcrash') and file.endswith('.data'):
u = rdmds(crash_dir + file.replace('.data', ''))
ncfile.add_variable('UVEL', u, 'xyz', gtype='u', units='m/s')
if file.startswith('stateVvelcrash') and file.endswith('.data'):
v = rdmds(crash_dir + file.replace('.data', ''))
ncfile.add_variable('VVEL', v, 'xyz', gtype='v', units='m/s')
if file.startswith('stateWvelcrash') and file.endswith('.data'):
w = rdmds(crash_dir + file.replace('.data', ''))
ncfile.add_variable('WVEL', w, 'xyz', gtype='w', units='m/s')
if file.startswith('stateEtacrash') and file.endswith('.data'):
eta = rdmds(crash_dir + file.replace('.data', ''))
ncfile.add_variable('ETAN', eta, 'xy', units='m')
if file.startswith('stateAreacrash') and file.endswith('.data'):
area = rdmds(crash_dir + file.replace('.data', ''))
ncfile.add_variable('SIarea', area, 'xy', units='fraction')
if file.startswith('stateHeffcrash') and file.endswith('.data'):
heff = rdmds(crash_dir + file.replace('.data', ''))
ncfile.add_variable('SIheff', heff, 'xy', units='m')
if file.startswith('stateUicecrash') and file.endswith('.data'):
uice = rdmds(crash_dir + file.replace('.data', ''))
ncfile.add_variable('SIuice', uice, 'xy', gtype='u', units='m/s')
if file.startswith('stateVicecrash') and file.endswith('.data'):
vice = rdmds(crash_dir + file.replace('.data', ''))
ncfile.add_variable('SIvice', vice, 'xy', gtype='v', units='m/s')
if file.startswith('stateQnetcrash') and file.endswith('.data'):
qnet = rdmds(crash_dir + file.replace('.data', ''))
ncfile.add_variable('Qnet', qnet, 'xy', units='W/m^2')
if file.startswith('stateMxlcrash') and file.endswith('.data'):
mld = rdmds(crash_dir + file.replace('.data', ''))
ncfile.add_variable('MXLDEPTH', mld, 'xy', units='m')
if file.startswith('stateEmpmrcrash') and file.endswith('.data'):
empmr = rdmds(crash_dir + file.replace('.data', ''))
ncfile.add_variable('Empmr', empmr, 'xy', units='kg/m^2/s')
ncfile.close()
# Helper function for average_monthly_files and make_climatology
# Find all the time-dependent variables in a NetCDF file (not counting 1D time-dependent variables such as 'time' and 'iters') and return a list of their names.
def time_dependent_variables (file_name):
var_names = []
id = nc.Dataset(file_name, 'r')
for var in id.variables:
if 'time' in id.variables[var].dimensions and len(id.variables[var].shape) > 1:
var_names.append(var)
id.close()
return var_names
# Do a proper time-average of files with monthly output, where each month is weighted with the number of days it represents. Make sure you load NCO before calling this function.
# Arguments:
# input_files: list of paths to filenames to time-average over. They will all be collapsed into a single record.
# output_file: path to filename to save the time-average.
# Optional keyword arguments:
# t_start: index (0-based) of the time record to start the average in input_files[0].
# t_end: index (0-based) of the time record to end the average in input_files[-1]. In python convention, this is the first index to ignore.
# leap_years: boolean (default true) for whether to consider leap years
# For example, to average from month 7 (index 6) of the first file to month 10 (index 9) of the last file, do
# average_monthly_files(input_files, output_file, t_start=6, t_end=10)
def average_monthly_files (input_files, output_file, t_start=0, t_end=None, leap_years=True):
from nco import Nco
from nco.custom import Limit
if isinstance(input_files, str):
# Only one file
input_files = [input_files]
# Extract the first time record from the first file
# This will make a skeleton file with 1 time record and all the right metadata; later we will overwrite the values of all the time-dependent variables with the weighted time-averages.
print(('Initialising ' + output_file))
nco = Nco()
nco.ncks(input=input_files[0], output=output_file, options=[Limit('time', t_start, t_start)])
# Get the starting date
time0 = netcdf_time(output_file)
year0 = time0[0].year
month0 = time0[0].month
# Find all the time-dependent variables
var_names = time_dependent_variables(output_file)
# Time-average each variable
id_out = nc.Dataset(output_file, 'a')
for var in var_names:
print(('Processing ' + var))
# Reset time
year = year0
month = month0
# Set up accumulation array of the right dimension for this variable
shape = id_out.variables[var].shape[1:]
data = np.zeros(shape)
# Also accumulate total number of days
total_days = 0
# Loop over input files
for i in range(len(input_files)):
file_name = input_files[i]
print(('...' + file_name))
# Figure out how many time indices there are
id_in = nc.Dataset(file_name, 'r')
num_time = id_in.variables[var].shape[0]
# Choose which indices we will loop over: special cases for first and last files, if t_start or t_end are set
if i == 0:
t_start_curr = t_start
else:
t_start_curr = 0
if i == len(input_files)-1 and t_end is not None:
t_end_curr = t_end
else:
t_end_curr = num_time
# Now loop over time indices
for t in range(t_start_curr, t_end_curr):
# Integrate
ndays = days_per_month(month, year)
if month==2 and not leap_years:
# Remove any leap day in February
ndays = 28
data += id_in.variables[var][t,:]*ndays
total_days += ndays
# Increment month (and year if needed)
month += 1
if month == 13:
month = 1
year += 1
id_in.close()
# Now convert from integral to average
data /= total_days
# Overwrite this variable in the output file
id_out.variables[var][0,:] = data
id_out.close()
# Do a basic average with no weighting of months.
def simple_average_files (input_files, output_file, t_start=0, t_end=None):
from nco import Nco
from nco.custom import Limit
nco = Nco()
if isinstance(input_files, str):
input_files = [input_files]
# Extract partial first and last files, if needed
num_time_start = netcdf_time(input_files[0]).size
num_time_end = netcdf_time(input_files[-1]).size
if t_end is None:
t_end = num_time_end
tmp_file_start = None
tmp_file_end = None
if t_start > 0:
tmp_file_start = input_files[0][:-3] + '_tmp.nc'
nco.ncks(input=input_files[0], output=tmp_file_start, options=[Limit('time', t_start, num_time_start-1)])
input_files[0] = tmp_file_start
if t_end < num_time_end:
tmp_file_end = input_files[-1][:-3] + '_tmp.nc'
nco.ncks(input=input_files[-1], output=tmp_file_end, options=[Limit('time', 0, t_end-1)])
# Now average
nco.ncra(input=input_files, output=output_file)
# Remove temporary files
if tmp_file_start is not None:
os.remove(tmp_file_start)
if tmp_file_end is not None:
os.remove(tmp_file_end)
# Call average_monthly_files for each year in the simulation. Make sure you load NCO before calling this function.
# Optional keyword arguments:
# in_dir: path to directory containing output_*.nc files
# out_dir: path to directory to save the annually averaged files
def make_annual_averages (in_dir='./', out_dir='./'):
in_dir = real_dir(in_dir)
out_dir = real_dir(out_dir)
# Find all the files of the form output_*.nc
file_names = build_file_list(in_dir)
num_files = len(file_names)
# Make sure their names go from 1 to n where n is the number of files
if '001' not in file_names[0] or '{0:03d}'.format(num_files) not in file_names[-1]:
print('Error (make_annual_average): based on filenames, you seem to be missing some files.')
sys.exit()
# Get the starting date
time0 = netcdf_time(file_names[0])[0]
if time0.month != 1:
print("Error (make_annual_average): this simulation doesn't start in January.")
sys.exit()
year0 = time0.year
# Save the number of months in each file
num_months = []
for file in file_names:
id = nc.Dataset(file)
num_months.append(id.variables['time'].size)
id.close()
# Now the work starts
year = year0
i = 0 # file number
t = 0 # the next time index that needs to be dealt with
files_to_average = [] # list of files containing timesteps from the current year
t_start = None # time index of files_to_average[0] to start the averaging from
# New iteration of loop each time we process a chunk of time from a file.
while True:
if len(files_to_average)==0 and t+12 <= num_months[i]:
# Option 1: Average a full year
files_to_average.append(file_names[i])
t_start = t
t_end = t+12
print(('Processing all of ' + str(year) + ' from ' + file_names[i] + ', indices ' + str(t_start) + ' to ' + str(t_end-1)))
average_monthly_files(files_to_average, out_dir+str(year)+'_avg.nc', t_start=t_start, t_end=t_end)
files_to_average = []