Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 11 additions & 6 deletions pygmt/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,19 @@
# Import modules to make the high-level GMT Python API
from pygmt import datasets
from pygmt.figure import Figure
from pygmt.filtering import blockmedian
from pygmt.gridding import surface
from pygmt.gridops import grdcut, grdfilter
from pygmt.mathops import makecpt
from pygmt.modules import GMTDataArrayAccessor, config, grdinfo, info, which
from pygmt.sampling import grdtrack
from pygmt.modules import GMTDataArrayAccessor, config, grdinfo
from pygmt.session_management import begin as _begin
from pygmt.session_management import end as _end
from pygmt.src import (
blockmedian,
grdcut,
grdfilter,
grdtrack,
info,
makecpt,
surface,
which,
)
from pygmt.x2sys import x2sys_cross, x2sys_init

# Get semantic version through setuptools-scm
Expand Down
3 changes: 1 addition & 2 deletions pygmt/datasets/earth_relief.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,8 @@
"""
import xarray as xr
from pygmt.exceptions import GMTInvalidInput
from pygmt.gridops import grdcut
from pygmt.helpers import kwargs_to_strings
from pygmt.modules import which
from pygmt.src import grdcut, which


@kwargs_to_strings(region="sequence")
Expand Down
2 changes: 1 addition & 1 deletion pygmt/datasets/tutorial.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
Functions to load sample data from the GMT tutorials.
"""
import pandas as pd
from pygmt.modules import which
from pygmt.src import which


def load_japan_quakes():
Expand Down
136 changes: 0 additions & 136 deletions pygmt/modules.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
"""
Non-plot GMT modules.
"""
import numpy as np
import xarray as xr
from pygmt.clib import Session
from pygmt.exceptions import GMTInvalidInput
Expand Down Expand Up @@ -55,141 +54,6 @@ def grdinfo(grid, **kwargs):
return result


@fmt_docstring
@use_alias(C="per_column", I="spacing", T="nearest_multiple", V="verbose")
def info(table, **kwargs):
"""
Get information about data tables.

Reads from files and finds the extreme values in each of the columns
reported as min/max pairs. It recognizes NaNs and will print warnings if
the number of columns vary from record to record. As an option, it will
find the extent of the first two columns rounded up and down to the nearest
multiple of the supplied increments given by *spacing*. Such output will be
in a numpy.ndarray form ``[w, e, s, n]``, which can be used directly as the
*region* argument for other modules (hence only dx and dy are needed). If
the *per_column* option is combined with *spacing*, then the numpy.ndarray
output will be rounded up/down for as many columns as there are increments
provided in *spacing*. A similar option *nearest_multiple* option will
provide a numpy.ndarray in the form of ``[zmin, zmax, dz]`` for makecpt.

Full option list at :gmt-docs:`gmtinfo.html`

{aliases}

Parameters
----------
table : str or np.ndarray or pandas.DataFrame or xarray.Dataset
Pass in either a file name to an ASCII data table, a 1D/2D numpy array,
a pandas dataframe, or an xarray dataset made up of 1D xarray.DataArray
data variables.
per_column : bool
Report the min/max values per column in separate columns.
spacing : str
``'[b|p|f|s]dx[/dy[/dz...]]'``.
Report the min/max of the first n columns to the nearest multiple of
the provided increments and output results in the form
``[w, e, s, n]``.
nearest_multiple : str
``'dz[+ccol]'``
Report the min/max of the first (0'th) column to the nearest multiple
of dz and output this in the form ``[zmin, zmax, dz]``.

{V}

Returns
-------
output : np.ndarray or str
Return type depends on whether any of the 'per_column', 'spacing', or
'nearest_multiple' parameters are set.

- np.ndarray if either of the above parameters are used.
- str if none of the above parameters are used.
"""
kind = data_kind(table)
with Session() as lib:
if kind == "file":
file_context = dummy_context(table)
elif kind == "matrix":
try:
# pandas.DataFrame and xarray.Dataset types
arrays = [array for _, array in table.items()]
except AttributeError:
# Python lists, tuples, and numpy ndarray types
arrays = np.atleast_2d(np.asanyarray(table).T)
file_context = lib.virtualfile_from_vectors(*arrays)
else:
raise GMTInvalidInput(f"Unrecognized data type: {type(table)}")

with GMTTempFile() as tmpfile:
with file_context as fname:
arg_str = " ".join(
[fname, build_arg_string(kwargs), "->" + tmpfile.name]
)
lib.call_module("info", arg_str)
result = tmpfile.read()

if any(arg in kwargs for arg in ["C", "I", "T"]):
# Converts certain output types into a numpy array
# instead of a raw string that is less useful.
if result.startswith(("-R", "-T")): # e.g. -R0/1/2/3 or -T0/9/1
result = result[2:].replace("/", " ")
result = np.loadtxt(result.splitlines())

return result


@fmt_docstring
@use_alias(G="download", V="verbose")
def which(fname, **kwargs):
"""
Find the full path to specified files.

Reports the full paths to the files given through *fname*. We look for
the file in (1) the current directory, (2) in $GMT_USERDIR (if defined),
(3) in $GMT_DATADIR (if defined), or (4) in $GMT_CACHEDIR (if defined).

*fname* can also be a downloadable file (either a full URL, a
`@file` special file for downloading from the GMT Site Cache, or
`@earth_relief_*` topography grids). In these cases, use option *download*
to set the desired behavior. If *download* is not used (or False), the file
will not be found.

Full option list at :gmt-docs:`gmtwhich.html`

{aliases}

Parameters
----------
fname : str
The file name that you want to check.
download : bool or str
If the file is downloadable and not found, we will try to download the
it. Use True or 'l' (default) to download to the current directory. Use
'c' to place in the user cache directory or 'u' user data directory
instead.
{V}

Returns
-------
path : str
The path of the file, depending on the options used.

Raises
------
FileNotFoundError
If the file is not found.
"""
with GMTTempFile() as tmpfile:
arg_str = " ".join([fname, build_arg_string(kwargs), "->" + tmpfile.name])
with Session() as lib:
lib.call_module("which", arg_str)
path = tmpfile.read().strip()
if not path:
raise FileNotFoundError("File '{}' not found.".format(fname))
return path


class config: # pylint: disable=invalid-name
"""
Set GMT defaults globally or locally.
Expand Down
8 changes: 8 additions & 0 deletions pygmt/src/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,5 +2,13 @@
Source code for PyGMT modules.
"""
# pylint: disable=import-outside-toplevel
from pygmt.src.blockmedian import blockmedian
from pygmt.src.grdcut import grdcut
from pygmt.src.grdfilter import grdfilter
from pygmt.src.grdtrack import grdtrack
from pygmt.src.info import info
from pygmt.src.inset import inset
from pygmt.src.makecpt import makecpt
from pygmt.src.meca import meca
from pygmt.src.surface import surface
from pygmt.src.which import which
2 changes: 1 addition & 1 deletion pygmt/filtering.py → pygmt/src/blockmedian.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
"""
GMT modules for Filtering of 1-D and 2-D Data.
blockmedian - Block average (x,y,z) data tables by median estimation.
"""
import pandas as pd
from pygmt.clib import Session
Expand Down
116 changes: 116 additions & 0 deletions pygmt/src/grdcut.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
"""
grdcut - Extract subregion from a grid.
"""

import xarray as xr
from pygmt.clib import Session
from pygmt.exceptions import GMTInvalidInput
from pygmt.helpers import (
GMTTempFile,
build_arg_string,
data_kind,
dummy_context,
fmt_docstring,
kwargs_to_strings,
use_alias,
)


@fmt_docstring
@use_alias(
G="outgrid",
R="region",
J="projection",
N="extend",
S="circ_subregion",
V="verbose",
Z="z_subregion",
)
@kwargs_to_strings(R="sequence")
def grdcut(grid, **kwargs):
"""
Extract subregion from a grid.

Produce a new *outgrid* file which is a subregion of *grid*. The
subregion is specified with *region*; the specified range must not exceed
the range of *grid* (but see *extend*). If in doubt, run
:meth:`pygmt.grdinfo` to check range. Alternatively, define the subregion
indirectly via a range check on the node values or via distances from a
given point. Finally, you can give *projection* for oblique projections to
determine the corresponding rectangular *region* setting that will give a
grid that fully covers the oblique domain.

Full option list at :gmt-docs:`grdcut.html`

{aliases}

Parameters
----------
grid : str or xarray.DataArray
The file name of the input grid or the grid loaded as a DataArray.
outgrid : str or None
The name of the output netCDF file with extension .nc to store the grid
in.
{J}
{R}
extend : bool or int or float
Allow grid to be extended if new *region* exceeds existing boundaries.
Give a value to initialize nodes outside current region.
circ_subregion : str
``'lon/lat/radius[unit][+n]'``.
Specify an origin (*lon* and *lat*) and *radius*; append a distance
*unit* and we determine the corresponding rectangular region so that
all grid nodes on or inside the circle are contained in the subset.
If **+n** is appended we set all nodes outside the circle to NaN.
z_subregion : str
``'[min/max][+n|N|r]'``.
Determine a new rectangular region so that all nodes outside this
region are also outside the given z-range [-inf/+inf]. To indicate no
limit on *min* or *max* only, specify a hyphen (-). Normally, any NaNs
encountered are simply skipped and not considered in the
range-decision. Append **+n** to consider a NaN to be outside the given
z-range. This means the new subset will be NaN-free. Alternatively,
append **+r** to consider NaNs to be within the data range. In this
case we stop shrinking the boundaries once a NaN is found [Default
simply skips NaNs when making the range decision]. Finally, if your
core subset grid is surrounded by rows and/or columns that are all
NaNs, append **+N** to strip off such columns before (optionally)
considering the range of the core subset for further reduction of the
area.

{V}

Returns
-------
ret: xarray.DataArray or None
Return type depends on whether the *outgrid* parameter is set:

- xarray.DataArray if *outgrid* is not set
- None if *outgrid* is set (grid output will be stored in *outgrid*)
"""
kind = data_kind(grid)

with GMTTempFile(suffix=".nc") as tmpfile:
with Session() as lib:
if kind == "file":
file_context = dummy_context(grid)
elif kind == "grid":
file_context = lib.virtualfile_from_grid(grid)
else:
raise GMTInvalidInput("Unrecognized data type: {}".format(type(grid)))

with file_context as infile:
if "G" not in kwargs.keys(): # if outgrid is unset, output to tempfile
kwargs.update({"G": tmpfile.name})
outgrid = kwargs["G"]
arg_str = " ".join([infile, build_arg_string(kwargs)])
lib.call_module("grdcut", arg_str)

if outgrid == tmpfile.name: # if user did not set outgrid, return DataArray
with xr.open_dataarray(outgrid) as dataarray:
result = dataarray.load()
_ = result.gmt # load GMTDataArray accessor information
else:
result = None # if user sets an outgrid, return None

return result
Loading