diff --git a/src/noisepy/seis/channelcatalog.py b/src/noisepy/seis/channelcatalog.py index d688b611..b05f71d0 100644 --- a/src/noisepy/seis/channelcatalog.py +++ b/src/noisepy/seis/channelcatalog.py @@ -68,7 +68,13 @@ def __init__(self, xmlpath: str, path_format: str = "{network}_{name}.xml", stor """ super().__init__() self.xmlpath = xmlpath - self.path_format = path_format + # If path_format is not provided, use a default format + if "ncedc" in xmlpath: + path_format: str = "{network}.{name}.xml" + self.path_format = path_format + else: + self.path_format = path_format + self.fs = get_filesystem(xmlpath, storage_options=storage_options) if not self.fs.exists(self.xmlpath): raise Exception(f"The XML Station file directory '{xmlpath}' doesn't exist") diff --git a/src/noisepy/seis/ncedc_s3store.py b/src/noisepy/seis/ncedc_s3store.py new file mode 100644 index 00000000..1b986af5 --- /dev/null +++ b/src/noisepy/seis/ncedc_s3store.py @@ -0,0 +1,168 @@ +import logging +import os +import re +from collections import defaultdict +from concurrent.futures import ThreadPoolExecutor +from datetime import datetime, timedelta, timezone +from typing import Callable, List + +import obspy +from datetimerange import DateTimeRange + +from noisepy.seis.channelcatalog import ChannelCatalog +from noisepy.seis.stores import RawDataStore + +from .datatypes import Channel, ChannelData, ChannelType, Station +from .utils import TimeLogger, fs_join, get_filesystem + +logger = logging.getLogger(__name__) + + +def channel_filter(stations: List[str], ch_prefixes: str) -> Callable[[Channel], bool]: + """ + Helper function for creating a channel filter to be used in the constructor of the store. + This filter uses a list of allowed station name along with a channel filter prefix. + """ + sta_set = set(stations) + + def filter(ch: Channel) -> bool: + if sta_set == {"*"}: + return ch.type.name.lower().startswith(tuple(ch_prefixes.lower().split(","))) + else: + return ch.station.name in sta_set and ch.type.name.lower().startswith(tuple(ch_prefixes.lower().split(","))) + + return filter + + +class NCEDCS3DataStore(RawDataStore): + """ + A data store implementation to read from a directory of miniSEED files from the NCEDC S3 bucket. + Every directory is a a day and each .ms file contains the data for a channel. + """ + + # TODO: Support reading directly from the S3 bucket + # for checking the filename has the form: AAS.NC.EHZ..D.2020.002 + file_re = re.compile(r".*[0-9]{4}.*[0-9]{3}$", re.IGNORECASE) + + def __init__( + self, + path: str, + chan_catalog: ChannelCatalog, + ch_filter: Callable[[Channel], bool] = lambda s: True, # noqa: E731 + date_range: DateTimeRange = None, + storage_options: dict = {}, + ): + """ + Parameters: + path: path to look for ms files. Can be a local file directory or an s3://... url path + chan_catalog: ChannelCatalog to retrieve inventory information for the channels + channel_filter: Function to decide whether a channel should be used or not, + if None, all channels are used + """ + super().__init__() + self.fs = get_filesystem(path, storage_options=storage_options) + self.channel_catalog = chan_catalog + self.path = path + self.paths = {} + # to store a dict of {timerange: list of channels} + self.channels = defaultdict(list) + self.ch_filter = ch_filter + if date_range is not None and date_range.start_datetime.tzinfo is None: + start_datetime = date_range.start_datetime.replace(tzinfo=timezone.utc) + end_datetime = date_range.end_datetime.replace(tzinfo=timezone.utc) + date_range = DateTimeRange(start_datetime, end_datetime) + + self.date_range = date_range + + if date_range is None: + self._load_channels(self.path, ch_filter) + + def _load_channels(self, full_path: str, ch_filter: Callable[[Channel], bool]): + tlog = TimeLogger(logger=logger, level=logging.INFO) + msfiles = [f for f in self.fs.glob(fs_join(full_path, "*")) if self.file_re.match(f) is not None] + tlog.log(f"Loading {len(msfiles)} files from {full_path}") + for f in msfiles: + timespan = NCEDCS3DataStore._parse_timespan(f) + self.paths[timespan.start_datetime] = full_path + channel = NCEDCS3DataStore._parse_channel(os.path.basename(f)) + if not ch_filter(channel): + continue + key = str(timespan) # DataTimeFrame is not hashable + self.channels[key].append(channel) + tlog.log(f"Init: {len(self.channels)} timespans and {sum(len(ch) for ch in self.channels.values())} channels") + + def _ensure_channels_loaded(self, date_range: DateTimeRange): + key = str(date_range) + if key not in self.channels or date_range.start_datetime not in self.paths: + dt = date_range.end_datetime - date_range.start_datetime + for d in range(0, dt.days): + date = date_range.start_datetime + timedelta(days=d) + if self.date_range is None or date not in self.date_range: + continue + date_path = str(date.year) + "/" + str(date.year) + "." + str(date.timetuple().tm_yday).zfill(3) + "/" + full_path = fs_join(self.path, date_path) + self._load_channels(full_path, self.ch_filter) + + def get_channels(self, date_range: DateTimeRange) -> List[Channel]: + self._ensure_channels_loaded(date_range) + tmp_channels = self.channels.get(str(date_range), []) + executor = ThreadPoolExecutor() + stations = set(map(lambda c: c.station, tmp_channels)) + _ = list(executor.map(lambda s: self.channel_catalog.get_inventory(date_range, s), stations)) + logger.info(f"Getting {len(tmp_channels)} channels for {date_range}: {tmp_channels}") + return list(executor.map(lambda c: self.channel_catalog.get_full_channel(date_range, c), tmp_channels)) + + def get_timespans(self) -> List[DateTimeRange]: + if self.date_range is not None: + days = (self.date_range.end_datetime - self.date_range.start_datetime).days + return [ + DateTimeRange( + self.date_range.start_datetime.replace(tzinfo=timezone.utc) + timedelta(days=d), + self.date_range.start_datetime.replace(tzinfo=timezone.utc) + timedelta(days=d + 1), + ) + for d in range(0, days) + ] + return list([DateTimeRange.from_range_text(d) for d in sorted(self.channels.keys())]) + + def read_data(self, timespan: DateTimeRange, chan: Channel) -> ChannelData: + self._ensure_channels_loaded(timespan) + # reconstruct the file name from the channel parameters + chan_str = f"{chan.station.name}.{chan.station.network}.{chan.type.name}." f"{chan.station.location}.D" + filename = fs_join( + self.paths[timespan.start_datetime], f"{chan_str}.{timespan.start_datetime.strftime('%Y.%j')}" + ) + if not self.fs.exists(filename): + logger.warning(f"Could not find file {filename}") + return ChannelData.empty() + + with self.fs.open(filename) as f: + stream = obspy.read(f) + data = ChannelData(stream) + return data + + def get_inventory(self, timespan: DateTimeRange, station: Station) -> obspy.Inventory: + return self.channel_catalog.get_inventory(timespan, station) + + def _parse_timespan(filename: str) -> DateTimeRange: + # The SCEDC S3 bucket stores files in the form: CIGMR__LHN___2022002.ms + # The NCEDC S3 bucket stores files in the form: AAS.NC.EHZ..D.2020.002 + year = int(filename[-8:-4]) + day = int(filename[-3:]) + jan1 = datetime(year, 1, 1, tzinfo=timezone.utc) + return DateTimeRange(jan1 + timedelta(days=day - 1), jan1 + timedelta(days=day)) + + def _parse_channel(filename: str) -> Channel: + # e.g. + # AAS.NC.EHZ..D.2020.002 + + network = filename.split(".")[1] + station = filename.split(".")[0] + channel = filename.split(".")[2] + location = filename.split(".")[3] + if len(channel) > 3: + channel = channel[:3] + return Channel( + ChannelType(channel, location), + # lat/lon/elev will be populated later + Station(network, station, location=location), + ) diff --git a/src/noisepy/seis/noise_module.py b/src/noisepy/seis/noise_module.py index d18ce90c..dfb170b7 100644 --- a/src/noisepy/seis/noise_module.py +++ b/src/noisepy/seis/noise_module.py @@ -220,6 +220,7 @@ def preprocess_raw( st[ii].data = np.float32(st[ii].data) st[ii].data = scipy.signal.detrend(st[ii].data, type="constant") st[ii].data = scipy.signal.detrend(st[ii].data, type="linear") + st[ii] = st[ii].taper(max_percentage=0.05) # merge, taper and filter the data if len(st) > 1: diff --git a/tutorials/noisepy_ncedc_tutorial.ipynb b/tutorials/noisepy_ncedc_tutorial.ipynb new file mode 100644 index 00000000..2f06e5bf --- /dev/null +++ b/tutorials/noisepy_ncedc_tutorial.ipynb @@ -0,0 +1,500 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "id": "PIA2IaqUOeOA" + }, + "source": [ + "# NoisePy NCEDC Tutorial\n", + "\n", + "Noisepy is a python software package to process ambient seismic noise cross correlations. This tutorial aims to introduce the use of noisepy for a toy problem on the NCEDC data. It can be ran locally or on the cloud.\n", + "\n", + "\n", + "The data is stored on AWS S3 as the NCEDC Data Set: https://ncedc.org/db/cloud/getstarted-pds.html\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First, we install the noisepy-seis package" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Uncomment and run this line if the environment doesn't have noisepy already installed:\n", + "# ! pip install noisepy-seis " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "__Warning__: NoisePy uses ```obspy``` as a core Python module to manipulate seismic data. If you use Google Colab, restart the runtime now for proper installation of ```obspy``` on Colab." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "WtDb2_y3Oreg" + }, + "source": [ + "## Import necessary modules\n", + "\n", + "Then we import the basic modules" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "vceZgD83PnNc" + }, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2\n", + "from noisepy.seis import cross_correlate, stack_cross_correlations, __version__ # noisepy core functions\n", + "from noisepy.seis.asdfstore import ASDFCCStore, ASDFStackStore # Object to store ASDF data within noisepy\n", + "from noisepy.seis.ncedc_s3store import NCEDCS3DataStore, channel_filter # Object to query SCEDC data from on S3\n", + "from noisepy.seis.datatypes import CCMethod, ConfigParameters, FreqNorm, StackMethod, TimeNorm # Main configuration object\n", + "from noisepy.seis.channelcatalog import XMLStationChannelCatalog # Required stationXML handling object\n", + "import os\n", + "from datetime import datetime, timezone\n", + "from datetimerange import DateTimeRange\n", + "\n", + "\n", + "from noisepy.seis.plotting_modules import plot_all_moveout\n", + "\n", + "print(f\"Using NoisePy version {__version__}\")\n", + "\n", + "path = \"./ncedc_data\" \n", + "\n", + "os.makedirs(path, exist_ok=True)\n", + "cc_data_path = os.path.join(path, \"CCF\")\n", + "stack_data_path = os.path.join(path, \"STACK\")\n", + "S3_STORAGE_OPTIONS = {\"s3\": {\"anon\": True}}" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "pntYzIYGNTn8" + }, + "source": [ + "We will work with a single day worth of data on NCEDC. The continuous data is organized with a single day and channel per miniseed. For this example, you can choose any year since 1993. We will just cross correlate a single day." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "yojR0Z3ALm6K" + }, + "outputs": [], + "source": [ + "# SCEDC S3 bucket common URL characters for that day.\n", + "S3_DATA = \"s3://ncedc-pds/continuous_waveforms/NC/\"\n", + "# timeframe for analysis\n", + "start = datetime(2012, 1, 1, tzinfo=timezone.utc)\n", + "end = datetime(2012, 1, 3, tzinfo=timezone.utc)\n", + "timerange = DateTimeRange(start, end)\n", + "print(timerange)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "E1CC-BljNzus" + }, + "source": [ + "The station information, including the instrumental response, is stored as stationXML in the following bucket" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "mhfgrMPALsYS" + }, + "outputs": [], + "source": [ + "S3_STATION_XML = \"s3://ncedc-pds/FDSNstationXML/NC/\" # S3 storage of stationXML\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ssaL5fa5IhI7" + }, + "source": [ + "## Ambient Noise Project Configuration\n", + "\n", + "We prepare the configuration of the workflow by declaring and storing parameters into the ``ConfigParameters()`` object and/or editing the ``config.yml`` file.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "dIjBD7riIfdJ" + }, + "outputs": [], + "source": [ + "# Initialize ambient noise workflow configuration\n", + "config = ConfigParameters() # default config parameters which can be customized\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Tsp7RfC8IwE-" + }, + "source": [ + "Customize the job parameters below:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "ByEiHRjmIAPB" + }, + "outputs": [], + "source": [ + "config.start_date = start\n", + "config.end_date = end\n", + "config.acorr_only = False # only perform auto-correlation or not\n", + "config.xcorr_only = True # only perform cross-correlation or not\n", + "\n", + "config.inc_hours = 24 # INC_HOURS is used in hours (integer) as the \n", + " #chunk of time that the paralelliztion will work.\n", + " # data will be loaded in memory, so reduce memory with smaller \n", + " # inc_hours if there are over 400+ stations.\n", + " # At regional scale for SCEDC, we can afford 20Hz data and inc_hour \n", + " # being a day of data.\n", + "\n", + " \n", + "# pre-processing parameters\n", + "config.samp_freq= 2 # (int) Sampling rate in Hz of desired processing (it can be different than the data sampling rate)\n", + "config.cc_len= 3600 # (float) basic unit of data length for fft (sec)\n", + "config.step= 1800.0 # (float) overlapping between each cc_len (sec)\n", + "\n", + "config.ncomp = 3 # 1 or 3 component data (needed to decide whether do rotation)\n", + "\n", + "config.stationxml= False # station.XML file used to remove instrument response for SAC/miniseed data\n", + " # If True, the stationXML file is assumed to be provided.\n", + "config.rm_resp= \"inv\" # select 'no' to not remove response and use 'inv' if you use the stationXML,'spectrum',\n", + "\n", + "\n", + "############## NOISE PRE-PROCESSING ##################\n", + "config.freqmin,config.freqmax = 0.05,2.0 # broad band filtering of the data before cross correlation\n", + "config.max_over_std = 10 # threshold to remove window of bad signals: set it to 10*9 if prefer not to remove them\n", + "\n", + "################### SPECTRAL NORMALIZATION ############\n", + "config.freq_norm= FreqNorm.RMA # choose between \"rma\" for a soft whitening or \"no\" for no whitening. Pure whitening is not implemented correctly at this point.\n", + "config.smoothspect_N = 10 # moving window length to smooth spectrum amplitude (points)\n", + " # here, choose smoothspect_N for the case of a strict whitening (e.g., phase_only)\n", + "\n", + "\n", + "#################### TEMPORAL NORMALIZATION ##########\n", + "config.time_norm = TimeNorm.NO # 'no' for no normalization, or 'rma', 'one_bit' for normalization in time domain,\n", + "config.smooth_N= 10 # moving window length for time domain normalization if selected (points)\n", + "\n", + "\n", + "############ cross correlation ##############\n", + "\n", + "config.cc_method= CCMethod.XCORR # 'xcorr' for pure cross correlation OR 'deconv' for deconvolution;\n", + " # FOR \"COHERENCY\" PLEASE set freq_norm to \"rma\", time_norm to \"no\" and cc_method to \"xcorr\"\n", + "\n", + "# OUTPUTS:\n", + "config.substack = True # True = smaller stacks within the time chunk. False: it will stack over inc_hours\n", + "config.substack_len = config.cc_len # how long to stack over (for monitoring purpose): need to be multiples of cc_len\n", + " # if substack=True, substack_len=2*cc_len, then you pre-stack every 2 correlation windows.\n", + " # for instance: substack=True, substack_len=cc_len means that you keep ALL of the correlations\n", + " # if substack=False, the cross correlation will be stacked over the inc_hour window\n", + "\n", + "### For monitoring applications ####\n", + "## we recommend substacking ever 2-4 cross correlations and storing the substacks\n", + "# e.g. \n", + "# config.substack = True \n", + "# config.substack_len = 4* config.cc_len\n", + "\n", + "config.maxlag= 300 # lags of cross-correlation to save (sec)\n", + "config.net_list=['NC']\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# For this tutorial make sure the previous run is empty\n", + "os.system(f\"rm -rf {cc_data_path}\")\n", + "os.system(f\"rm -rf {stack_data_path}\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Wwv1QCQhP_0Y" + }, + "source": [ + "## Step 1: Cross-correlation\n", + "\n", + "In this instance, we read directly the data from the scedc bucket into the cross correlation code. We are not attempting to recreate a data store. Therefore we go straight to step 1 of the cross correlations." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We first declare the data and cross correlation stores" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "jq2DKIS9Rl2H" + }, + "outputs": [], + "source": [ + "stations = \"KCT,KRP,KHMB\".split(\",\") # filter to these stations\n", + "catalog = XMLStationChannelCatalog(S3_STATION_XML, storage_options=S3_STORAGE_OPTIONS) # Station catalog\n", + "raw_store = NCEDCS3DataStore(S3_DATA, catalog, channel_filter(stations, \"HH\"), timerange, storage_options=S3_STORAGE_OPTIONS) # Store for reading raw data from S3 bucket\n", + "cc_store = ASDFCCStore(cc_data_path) # Store for writing CC data" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "get the time range of the data in the data store inventory" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "span = raw_store.get_timespans()\n", + "print(span)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Get the channels available during a given time spane" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "channel_list=raw_store.get_channels(span[0])\n", + "print(channel_list)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5qsPGkNp9Msx" + }, + "source": [ + "## Perform the cross correlation\n", + "The data will be pulled from SCEDC, cross correlated, and stored locally if this notebook is ran locally.\n", + "If you are re-calculating, we recommend to clear the old ``cc_store``." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "49MnDXYp9Msx" + }, + "outputs": [], + "source": [ + "cross_correlate(raw_store, config, cc_store)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "GMeH9BslQSSJ" + }, + "source": [ + "The cross correlations are saved as a single file for each channel pair and each increment of inc_hours. We now will stack all the cross correlations over all time chunk and look at all station pairs results.\n", + "\n", + "## Step 2: Stack the cross correlation\n", + "\n", + "We now create the stack stores. Because this tutorial runs locally, we will use an ASDF stack store to output the data. ASDF is a data container in HDF5 that is used in full waveform modeling and inversion. H5 behaves well locally. \n", + "\n", + "Each station pair will have 1 H5 file with all components of the cross correlations. While this produces **many** H5 files, it has come down to the noisepy team's favorite option: \n", + "1. the thread-safe installation of HDF5 is not trivial\n", + "2. the choice of grouping station pairs within a single file is not flexible to a broad audience of users." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "cd32ntmAVx-z" + }, + "outputs": [], + "source": [ + "# open a new cc store in read-only mode since we will be doing parallel access for stacking\n", + "cc_store = ASDFCCStore(cc_data_path, mode=\"r\")\n", + "stack_store = ASDFStackStore(stack_data_path)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Configure the stacking\n", + "\n", + "There are various methods for optimal stacking. We refern to Yang et al (2022) for a discussion and comparison of the methods:\n", + "\n", + "Yang X, Bryan J, Okubo K, Jiang C, Clements T, Denolle MA. Optimal stacking of noise cross-correlation functions. Geophysical Journal International. 2023 Mar;232(3):1600-18. https://doi.org/10.1093/gji/ggac410\n", + "\n", + "Users have the choice to implement *linear*, phase weighted stacks *pws* (Schimmel et al, 1997), *robust* stacking (Yang et al, 2022), *acf* autocovariance filter (Nakata et al, 2019), *nroot* stacking. Users may choose the stacking method of their choice by entering the strings in ``config.stack_method``.\n", + "\n", + "If chosen *all*, the current code only ouputs *linear*, *pws*, *robust* since *nroot* is less common and *acf* is computationally expensive.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "config.stack_method = StackMethod.LINEAR" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "method_list = [method for method in dir(StackMethod) if not method.startswith(\"__\")]\n", + "print(method_list)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cc_store.get_station_pairs()\n", + "config.stations=[\"*\"]\n", + "config.net_list=[\"*\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "stack_cross_correlations(cc_store, stack_store, config)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "jQ-ey7uX9Msx" + }, + "source": [ + "## QC_1 of the cross correlations for Imaging\n", + "We now explore the quality of the cross correlations. We plot the moveout of the cross correlations, filtered in some frequency band." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cc_store.get_station_pairs()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pairs = stack_store.get_station_pairs()\n", + "print(f\"Found {len(pairs)} station pairs\")\n", + "sta_stacks = stack_store.read_bulk(None, pairs) # no timestamp used in ASDFStackStore" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "QKSeQpk7WKlW" + }, + "outputs": [], + "source": [ + "plot_all_moveout(sta_stacks, 'Allstack_linear', 0.1, 0.2, 'ZZ', 1)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "@webio": { + "lastCommId": null, + "lastKernelId": null + }, + "colab": { + "provenance": [] + }, + "gpuClass": "standard", + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +}