Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Ion rework v2 #8

Merged
merged 11 commits into from
Nov 10, 2022
Merged
9 changes: 9 additions & 0 deletions irrad_control/analysis/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,3 +8,12 @@

# nano prefix
nano = 1e-9

# Conversion factor for MeV/g to Mrad, 1 eV = 1.602e-19 J, 1 rad = 0.01 J/kg
# -> MeV / g = 1e6 * 1.602e-19 J / 1e-3 kg
# -> MeV / g = 1e9 * 1.602e-19 J / kg
# -> MeV / g = 1e9 * 1.602e-19 * 1e2 rad
# -> MeV / g = 1e11 * 1.602e-19 rad
# -> Mev / g = 1e5 * 1.602e-19 Mrad
# -> Mev / g = 1e5 * elementary_charge * Mrad
MEV_PER_GRAM_TO_MRAD = 1e5 * elementary_charge
41 changes: 22 additions & 19 deletions irrad_control/analysis/damage.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,14 +13,15 @@ def analyse_radiation_damage(data, config=None):
bins = (100, 100)

# Dict that holds results and error maps; bin centers
results = {r: None for r in ('proton', 'neq', 'tid')}
results = {r: None for r in ('primary', 'neq', 'tid')}
errors = {e: None for e in results}
bin_centers = {'x': None, 'y': None}

# We have a multipart irradiation with mulitple datafiles
if config is None:

server = None # Only allow files with exactly one server for multipart to avoid adding unrelated fluence maps
ion_name = None

# Loop over generator and get partial data files
for nfile, data_part, config_part, session_basename in data:
Expand All @@ -35,28 +36,29 @@ def analyse_radiation_damage(data, config=None):
# Only allow one fixed server for multipart
if server is None:
server = server_config['name']
ion_name = server_config['daq']['ion']

if server not in data_part:
raise KeyError(f"Server '{server}' not present in file {session_basename}!")

# Initialize damage and error maps
if nfile == 0:

results['proton'], errors['proton'], bin_centers['x'], bin_centers['y'] = fluence.generate_fluence_map(beam_data=data_part[server]['Beam'],
results['primary'], errors['primary'], bin_centers['x'], bin_centers['y'] = fluence.generate_fluence_map(beam_data=data_part[server]['Beam'],
scan_data=data_part[server]['Scan'],
beam_sigma=beam_sigma,
bins=bins)
# Generate eqivalent fluence map as well as TID map
if server_config['daq']['kappa'] is None:
del results['neq']
else:
results['neq'] = results['proton'] * server_config['daq']['kappa']['nominal']
results['neq'] = results['primary'] * server_config['daq']['kappa']['nominal']

print(server_config['daq']['stopping_power'], type(server_config['daq']['stopping_power']))
if server_config['daq']['stopping_power'] is None:
del results['tid']
else:
results['tid'] = formulas.tid_scan(proton_fluence=results['proton'], stopping_power=server_config['daq']['stopping_power'])
results['tid'] = formulas.tid_per_scan(primary_fluence=results['primary'], stopping_power=server_config['daq']['stopping_power'])

continue

Expand All @@ -65,38 +67,39 @@ def analyse_radiation_damage(data, config=None):
beam_sigma=beam_sigma,
bins=bins)
# Add to overall map
results['proton'] += fluence_map_part
errors['proton'] = (errors['proton']**2 + fluence_map_part_error**2)**.5
results['primary'] += fluence_map_part
errors['primary'] = (errors['primary']**2 + fluence_map_part_error**2)**.5

# Add to eqivalent fluence map
if 'neq' in results:
results['neq'] += results['proton'] * server_config['daq']['kappa']['nominal']
errors['neq'] = ((server_config['daq']['kappa']['nominal'] * errors['proton'])**2 + (results['proton'] * server_config['daq']['kappa']['sigma'])**2)**0.5
results['neq'] += results['primary'] * server_config['daq']['kappa']['nominal']
errors['neq'] = ((server_config['daq']['kappa']['nominal'] * errors['primary'])**2 + (results['primary'] * server_config['daq']['kappa']['sigma'])**2)**0.5

if 'tid' in results:
results['tid'] += formulas.tid_scan(proton_fluence=results['proton'], stopping_power=server_config['daq']['stopping_power'])
errors['tid'] = formulas.tid_scan(proton_fluence=errors['proton'], stopping_power=server_config['daq']['stopping_power'])
results['tid'] += formulas.tid_per_scan(primary_fluence=results['primary'], stopping_power=server_config['daq']['stopping_power'])
errors['tid'] = formulas.tid_per_scan(primary_fluence=errors['primary'], stopping_power=server_config['daq']['stopping_power'])

else:

server = config['name']
ion_name = config['daq']['ion']

results['proton'], errors['proton'], bin_centers['x'], bin_centers['y'] = fluence.generate_fluence_map(beam_data=data[server]['Beam'],
results['primary'], errors['primary'], bin_centers['x'], bin_centers['y'] = fluence.generate_fluence_map(beam_data=data[server]['Beam'],
scan_data=data[server]['Scan'],
beam_sigma=beam_sigma,
bins=bins)
# Generate eqivalent fluence map as well as TID map
if config['daq']['kappa'] is None:
del results['neq']
else:
results['neq'] = results['proton'] * config['daq']['kappa']['nominal']
errors['neq'] = ((config['daq']['kappa']['nominal'] * errors['proton'])**2 + (results['proton'] * config['daq']['kappa']['sigma'])**2)**.5
results['neq'] = results['primary'] * config['daq']['kappa']['nominal']
errors['neq'] = ((config['daq']['kappa']['nominal'] * errors['primary'])**2 + (results['primary'] * config['daq']['kappa']['sigma'])**2)**.5

if config['daq']['stopping_power'] is None:
del results['tid']
else:
results['tid'] = formulas.tid_scan(proton_fluence=results['proton'], stopping_power=config['daq']['stopping_power'])
errors['tid'] = formulas.tid_scan(proton_fluence=errors['proton'], stopping_power=config['daq']['stopping_power'])
results['tid'] = formulas.tid_per_scan(primary_fluence=results['primary'], stopping_power=config['daq']['stopping_power'])
errors['tid'] = formulas.tid_per_scan(primary_fluence=errors['primary'], stopping_power=config['daq']['stopping_power'])

if any(a is None for a in (list(bin_centers.values()) + list(results.values()))):
raise ValueError('Uninitialized values! Something went wrong - maybe files not found?')
Expand All @@ -123,16 +126,16 @@ def analyse_radiation_damage(data, config=None):

is_dut = damage_map.shape == dut_map.shape

fig, _ = plotting.plot_damage_map_3d(damage_map=damage_map, map_centers_x=centers_x, map_centers_y=centers_y, contour=not is_dut, damage=damage, server=server, dut=is_dut)
fig, _ = plotting.plot_damage_map_3d(damage_map=damage_map, map_centers_x=centers_x, map_centers_y=centers_y, contour=not is_dut, damage=damage, ion_name=ion_name, server=server, dut=is_dut)
figs.append(fig)

fig, _ = plotting.plot_damage_error_3d(damage_map=damage_map, error_map=errors[damage] if not is_dut else dut_error_map, map_centers_x=centers_x, map_centers_y=centers_y, contour=not is_dut, damage=damage, server=server, dut=is_dut)
fig, _ = plotting.plot_damage_error_3d(damage_map=damage_map, error_map=errors[damage] if not is_dut else dut_error_map, map_centers_x=centers_x, map_centers_y=centers_y, contour=not is_dut, damage=damage, ion_name=ion_name, server=server, dut=is_dut)
figs.append(fig)

fig, _ = plotting.plot_damage_map_2d(damage_map=damage_map, map_centers_x=centers_x, map_centers_y=centers_y, damage=damage, server=server, dut=is_dut)
fig, _ = plotting.plot_damage_map_2d(damage_map=damage_map, map_centers_x=centers_x, map_centers_y=centers_y, damage=damage, ion_name=ion_name, server=server, dut=is_dut)
figs.append(fig)

fig, _ = plotting.plot_damage_map_contourf(damage_map=damage_map, map_centers_x=centers_x, map_centers_y=centers_y, damage=damage, server=server, dut=is_dut)
fig, _ = plotting.plot_damage_map_contourf(damage_map=damage_map, map_centers_x=centers_x, map_centers_y=centers_y, damage=damage, ion_name=ion_name, server=server, dut=is_dut)
figs.append(fig)

logging.info("Finished plotting.")
Expand Down
22 changes: 11 additions & 11 deletions irrad_control/analysis/dtype.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,8 @@
('row_stop_y', '<f4'), # # y component of the stopping position of currently-scanned row [mm]
('row_mean_beam_current', '<f4'), # Mean of the beam current during scanning current row [A]
('row_mean_beam_current_error', '<f4'), # Error of the beam current; quadratic addition of std of beam current and measurement error [A]
('row_proton_fluence', '<f8'), # The proton fluence during scanning current row [protons/cm^2]
('row_proton_fluence_error', '<f8'), # Error of the proton fluence during scanning current row [protons/cm^2]
('row_primary_fluence', '<f8'), # The ion fluence during scanning current row [ions/cm^2]
('row_primary_fluence_error', '<f8'), # Error of the ion fluence during scanning current row [ions/cm^2]
('row_tid', '<f4'), # The TID during scanning current row [Mrad]
('row_tid_error', '<f4'), # Error of the tid [Mrad]
('row_scan_speed', '<f4'), # Speed with which the sample is scanned [mm/s]
Expand All @@ -48,8 +48,8 @@
_irrad_dtype = [('timestamp', '<f8'), # Posix-timestamp of init [s]
('row_separation', '<f4'), # Row separation e.g. step size of scan, spacing in between scanned rows [mm]
('n_rows', '<i2'), # Number of total rows in scan
('aim_damage', 'S4'), # Either NIEL or TID
('aim_value', '<f4'), # Nominal value of damage to be induced, either in neq/cm^2 or Mrad
('aim_damage', 'S8'), # Either neq, tid or primary
('aim_value', '<f4'), # Nominal value of damage to be induced, either in neq/cm^2, Mrad or ions / cm^2
('min_scan_current', '<f4'), # Minimum current for scanning [A]
('scan_origin_x', '<f4'), # x component of the scan origin from where the rel. coord. system is constructed [mm]
('scan_origin_y', '<f4'), # y component of the scan origin from where the rel. coord. system is constructed [mm]
Expand All @@ -61,18 +61,18 @@
# Damage data dtype; contains NIEL and TID damage data on a per-scan basis
_damage_dtype = [('timestamp', '<f8'), # Timestamp [s]
('scan', '<i2'), # Number of *completed* scans,
('scan_proton_fluence', '<f8'), # Proton fluence delivered in this scan [protons/cm^2]
('scan_proton_fluence_error', '<f8'), # Error of proton fluence delivered in this scan [protons/cm^2]
('scan_primary_fluence', '<f8'), # ion fluence delivered in this scan [ions/cm^2]
('scan_primary_fluence_error', '<f8'), # Error of ion fluence delivered in this scan [ions/cm^2]
('scan_tid', '<f8'), # Total-ionizing dose delivered in this scan [Mrad]
('scan_tid_error', '<f8')] # Error of total-ionizing dose delivered in this scan [Mrad]


# Result data type: contains proton as well as neutron fluence and scaling factor
# Result data type: contains ion as well as neutron fluence and scaling factor
_result_dtype = [('timestamp', '<f8'),
('proton_fluence', '<f8'),
('proton_fluence_error', '<f8'),
('neutron_fluence', '<f8'),
('neutron_fluence_error', '<f8'),
('primary_fluence', '<f8'),
('primary_fluence_error', '<f8'),
('neq_fluence', '<f8'),
('neq_fluence_error', '<f8'),
('tid', '<f4'),
('tid_error', '<f4')]

Expand Down
34 changes: 17 additions & 17 deletions irrad_control/analysis/fluence.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def generate_fluence_map(beam_data, scan_data, beam_sigma, bins=(100, 100)):
# Take sqrt of error map squared
fluence_map_error = np.sqrt(fluence_map_error)

# Scale from protons / mm² (intrinsic unit) to protons / cm²
# Scale from ions / mm² (intrinsic unit) to ions / cm²
fluence_map *= 100
fluence_map_error *= 100

Expand Down Expand Up @@ -403,7 +403,7 @@ def _process_row_wait(row_data, wait_beam_data, fluence_map, fluence_map_error,
wait_mu_y = row_data['row_start_y'] - scan_y_offset

# Add variation to the uncertainty
wait_protons_std = np.std(wait_beam_data['beam_current'])
wait_ions_std = np.std(wait_beam_data['beam_current'])

# Loop over currents and apply Gauss kernel at given position
for i in range(wait_beam_data.shape[0] - 1):
Expand All @@ -415,16 +415,16 @@ def _process_row_wait(row_data, wait_beam_data, fluence_map, fluence_map_error,
# Calculate how many seconds this current was present while waiting
wait_interval = wait_beam_data[i+1]['timestamp'] - wait_beam_data[i]['timestamp']

# Integrate over *wait_interval* to obtain number of protons induced
wait_protons = wait_current * wait_interval / elementary_charge
wait_protons_error = wait_current_error * wait_interval / elementary_charge
wait_protons_error = (wait_protons_error**2 + wait_protons_std**2)**.5
# Integrate over *wait_interval* to obtain number of ions induced
wait_ions = wait_current * wait_interval / elementary_charge
wait_ions_error = wait_current_error * wait_interval / elementary_charge
wait_ions_error = (wait_ions_error**2 + wait_ions_std**2)**.5

# Apply Gaussian kernel for protons
# Apply Gaussian kernel for ions
apply_gauss_2d_kernel(map_2d=fluence_map,
map_2d_error=fluence_map_error,
amplitude=wait_protons,
amplitude_error=wait_protons_error,
amplitude=wait_ions,
amplitude_error=wait_ions_error,
bin_centers_x=map_bin_centers_x,
bin_centers_y=map_bin_centers_y,
mu_x=wait_mu_x,
Expand Down Expand Up @@ -479,23 +479,23 @@ def _process_row_scan(row_data, row_beam_data, fluence_map, fluence_map_error, r
row_bin_center_currents = np.interp(row_bin_center_timestamps, row_beam_data['timestamp'], row_beam_data['beam_current'])
row_bin_center_current_errors = np.interp(row_bin_center_timestamps, row_beam_data['timestamp'], row_beam_data['beam_current_error'])

# Integrate the current measurements with the times spent in each bin to calculate the amount of protons in the bin
row_bin_center_protons = (row_bin_center_currents * row_bin_transit_times) / elementary_charge
row_bin_center_proton_errors = (row_bin_center_current_errors * row_bin_transit_times) / elementary_charge
row_bin_center_proton_errors = (row_bin_center_proton_errors**2 + np.std(row_bin_center_protons)**2)**.5
# Integrate the current measurements with the times spent in each bin to calculate the amount of ions in the bin
row_bin_center_ions = (row_bin_center_currents * row_bin_transit_times) / elementary_charge
row_bin_center_ion_errors = (row_bin_center_current_errors * row_bin_transit_times) / elementary_charge
row_bin_center_ion_errors = (row_bin_center_ion_errors**2 + np.std(row_bin_center_ions)**2)**.5

# Loop over row times
for i in range(row_bin_center_protons.shape[0]):
for i in range(row_bin_center_ions.shape[0]):

# Update mean location of the distribution
mu_x = map_bin_centers_x[(-(i+1) if row_data['row'] % 2 else i)]
mu_y = row_data['row_start_y'] - scan_y_offset

# Apply Gaussian kernel for protons
# Apply Gaussian kernel for ions
apply_gauss_2d_kernel(map_2d=fluence_map,
map_2d_error=fluence_map_error,
amplitude=row_bin_center_protons[i],
amplitude_error=row_bin_center_proton_errors[i],
amplitude=row_bin_center_ions[i],
amplitude_error=row_bin_center_ion_errors[i],
bin_centers_x=map_bin_centers_x,
bin_centers_y=map_bin_centers_y,
mu_x=mu_x,
Expand Down
Loading