Skip to content

Commit 9a82367

Browse files
authored
add loop components to experiments folder (#7)
1 parent 3746155 commit 9a82367

File tree

5 files changed

+659
-0
lines changed

5 files changed

+659
-0
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
from .main_loop_pilot import new_experiment, Hab, Ephys, validate_selected_workflow
2+
from .loop_workflow_widget import loop_workflow_widget
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,138 @@
1+
"""
2+
May '23 OpenScope: Barcode stimuli
3+
"""
4+
5+
import argparse
6+
import json
7+
import logging
8+
import os
9+
import time
10+
11+
import numpy as np
12+
from psychopy import visual
13+
from camstim import Foraging
14+
from camstim import Stimulus_v2
15+
from camstim import SweepStim_v2
16+
from camstim import Warp, Window
17+
18+
19+
# get params ------------------------------------------------------------------
20+
# stored in json file -
21+
# path to json supplied by camstim via command line arg when this script is called
22+
23+
parser = argparse.ArgumentParser()
24+
parser.add_argument(
25+
"params_path",
26+
nargs="?",
27+
type=str,
28+
default="",
29+
)
30+
args, _ = parser.parse_known_args()
31+
32+
with open(args.params_path, "r") as f:
33+
json_params = json.load(f)
34+
35+
# Create display window
36+
# ----------------------------------------------------------------------------
37+
window = Window(
38+
fullscr=True,
39+
monitor=json_params["monitor"],
40+
screen=0,
41+
warp=Warp.Spherical,
42+
)
43+
44+
# patch the Stimulus_v2 class to allow for serializing without large arrays
45+
# ----------------------------------------------------------------------------
46+
class Stimulus_v2_MinusFrameArrays(Stimulus_v2):
47+
48+
def __init__(self, *args, **kwargs):
49+
super(Stimulus_v2_MinusFrameArrays, self).__init__(*args, **kwargs)
50+
51+
def package(self):
52+
"""
53+
Package for serializing - minus large arrays of frame timing/order.
54+
"""
55+
if not self.save_sweep_table:
56+
self.sweep_table = None
57+
self.sweep_params = self.sweep_params.keys()
58+
self_dict = self.__dict__
59+
del self_dict['sweep_frames']
60+
del self_dict['sweep_order']
61+
self_dict['stim'] = str(self_dict['stim'])
62+
return wecanpicklethat(self_dict)
63+
64+
# ----------------------------------------------------------------------------
65+
# setup mapping stim
66+
"""from mapping_script_v2.py"""
67+
68+
mapping_stimuli = []
69+
70+
# load common stimuli
71+
gabor_path = json_params["gabor_path"]
72+
flash_path = json_params["flash_path"]
73+
gabor = Stimulus_v2_MinusFrameArrays.from_file(gabor_path, window)
74+
flash = Stimulus_v2_MinusFrameArrays.from_file(flash_path, window)
75+
76+
gabor_duration_sec = json_params["default_gabor_duration_seconds"]
77+
flash_duration_sec = json_params["default_flash_duration_seconds"]
78+
79+
original_duration_sec = gabor_duration_sec + flash_duration_sec
80+
81+
# if max total duration is set, and less than original movie length, cut down display sequence:
82+
max_mapping_duation_minutes = json_params[
83+
"max_total_duration_minutes"
84+
] # can be zero, in which case we use the full movie length
85+
max_mapping_duration_sec = max_mapping_duation_minutes * 60
86+
if 0 < max_mapping_duration_sec < original_duration_sec:
87+
logging.info("Mapping duration capped at %s minutes", max_mapping_duation_minutes)
88+
89+
logging.info("original gabor duration: %s sec", gabor_duration_sec)
90+
logging.info("original flash duration: %s sec", flash_duration_sec)
91+
logging.info("max mapping duration: %s sec", max_mapping_duration_sec)
92+
93+
gabor_duration_sec = (
94+
max_mapping_duration_sec * gabor_duration_sec
95+
) / original_duration_sec
96+
flash_duration_sec = (
97+
max_mapping_duration_sec * flash_duration_sec
98+
) / original_duration_sec
99+
100+
logging.info("modified gabor duration: %s sec", gabor_duration_sec)
101+
logging.info("modified flash duration: %s sec", flash_duration_sec)
102+
103+
# setup timing
104+
mapping_sequence_start_sec = 0 # if stims are daisy-chained within one script, this should be the end of the prev stim
105+
gabor.set_display_sequence([(mapping_sequence_start_sec, gabor_duration_sec)])
106+
flash.set_display_sequence(
107+
[(gabor_duration_sec, gabor_duration_sec + flash_duration_sec)]
108+
)
109+
110+
mapping_stimuli = [gabor, flash]
111+
112+
mapping_sequence_end_sec = (
113+
gabor_duration_sec + flash_duration_sec
114+
) # if daisy-chained, the next stim in this script should start at this time
115+
116+
# create SweepStim_v2 instance for main stimulus
117+
ss = SweepStim_v2(
118+
window,
119+
stimuli=mapping_stimuli,
120+
pre_blank_sec=json_params["pre_blank_screen_sec"],
121+
post_blank_sec=json_params["post_blank_screen_sec"],
122+
params=json_params["sweepstim"],
123+
)
124+
125+
# add in foraging so we can track wheel, potentially give rewards, etc
126+
f = Foraging(
127+
window=window,
128+
auto_update=False,
129+
params=json_params["sweepstim"],
130+
nidaq_tasks={
131+
"digital_input": ss.di,
132+
"digital_output": ss.do,
133+
},
134+
) # share di and do with SS
135+
136+
ss.add_item(f, "foraging")
137+
138+
ss.run()
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,219 @@
1+
# -*- coding: utf-8 -*-
2+
"""
3+
optotagging.py
4+
5+
runs optotagging code for ecephys pipeline experiments
6+
7+
8+
9+
(c) 2018 Allen Institute for Brain Science
10+
11+
"""
12+
import camstim # ensures "magic" gets setup properly by importing first
13+
import logging # must occur after camstim import for "magic"
14+
from camstim.zro import agent
15+
16+
import numpy as np
17+
from toolbox.IO.nidaq import AnalogOutput
18+
from toolbox.IO.nidaq import DigitalOutput
19+
20+
import datetime
21+
import numpy as np
22+
import time
23+
import pickle as pkl
24+
25+
26+
# %%
27+
28+
29+
def run_optotagging(levels, conditions, waveforms, isis, sampleRate=10000.):
30+
31+
from toolbox.IO.nidaq import AnalogOutput
32+
from toolbox.IO.nidaq import DigitalOutput
33+
34+
sweep_on = np.array([0, 0, 1, 0, 0, 0, 0, 0], dtype=np.uint8)
35+
stim_on = np.array([0, 0, 1, 1, 0, 0, 0, 0], dtype=np.uint8)
36+
stim_off = np.array([0, 0, 1, 0, 0, 0, 0, 0], dtype=np.uint8)
37+
sweep_off = np.array([0, 0, 0, 0, 0, 0, 0, 0], dtype=np.uint8)
38+
39+
ao = AnalogOutput('Dev1', channels=[1])
40+
ao.cfg_sample_clock(sampleRate)
41+
42+
do = DigitalOutput('Dev1', 2)
43+
44+
do.start()
45+
ao.start()
46+
47+
do.write(sweep_on)
48+
time.sleep(5)
49+
50+
for i, level in enumerate(levels):
51+
52+
print(level)
53+
54+
data = waveforms[conditions[i]]
55+
56+
do.write(stim_on)
57+
ao.write(data * level)
58+
do.write(stim_off)
59+
time.sleep(isis[i])
60+
61+
do.write(sweep_off)
62+
do.clear()
63+
ao.clear()
64+
65+
# %%
66+
67+
68+
def generatePulseTrain(pulseWidth, pulseInterval, numRepeats, riseTime, sampleRate=10000.):
69+
70+
data = np.zeros((int(sampleRate),), dtype=np.float64)
71+
# rise_samples =
72+
73+
rise_and_fall = (
74+
((1 - np.cos(np.arange(sampleRate*riseTime/1000., dtype=np.float64)*2*np.pi/10))+1)-1)/2
75+
half_length = rise_and_fall.size / 2
76+
rise = rise_and_fall[:half_length]
77+
fall = rise_and_fall[half_length:]
78+
79+
peak_samples = int(sampleRate*(pulseWidth-riseTime*2)/1000)
80+
peak = np.ones((peak_samples,))
81+
82+
pulse = np.concatenate((rise,
83+
peak,
84+
fall))
85+
86+
interval = int(pulseInterval*sampleRate/1000.)
87+
88+
for i in range(0, numRepeats):
89+
data[i*interval:i*interval+pulse.size] = pulse
90+
91+
return data
92+
93+
94+
# %% create waveforms
95+
96+
def optotagging(mouseID, operation_mode='experiment', level_list=[1.15, 1.28, 1.345], genotype=None):
97+
98+
sampleRate = 10000
99+
100+
# 1 s cosine ramp:
101+
data_cosine = (((1 - np.cos(np.arange(sampleRate, dtype=np.float64)
102+
* 2*np.pi/sampleRate)) + 1) - 1)/2 # create raised cosine waveform
103+
104+
# 1 ms cosine ramp:
105+
rise_and_fall = (
106+
((1 - np.cos(np.arange(sampleRate*0.001, dtype=np.float64)*2*np.pi/10))+1)-1)/2
107+
half_length = rise_and_fall.size / 2
108+
109+
# pulses with cosine ramp:
110+
pulse_2ms = np.concatenate((rise_and_fall[:half_length], np.ones(
111+
(int(sampleRate*0.001),)), rise_and_fall[half_length:]))
112+
pulse_5ms = np.concatenate((rise_and_fall[:half_length], np.ones(
113+
(int(sampleRate*0.004),)), rise_and_fall[half_length:]))
114+
pulse_10ms = np.concatenate((rise_and_fall[:half_length], np.ones(
115+
(int(sampleRate*0.009),)), rise_and_fall[half_length:]))
116+
117+
data_2ms_10Hz = np.zeros((sampleRate,), dtype=np.float64)
118+
119+
for i in range(0, 10):
120+
interval = sampleRate / 10
121+
data_2ms_10Hz[i*interval:i*interval+pulse_2ms.size] = pulse_2ms
122+
123+
data_5ms = np.zeros((sampleRate,), dtype=np.float64)
124+
data_5ms[:pulse_5ms.size] = pulse_5ms
125+
126+
data_10ms = np.zeros((sampleRate,), dtype=np.float64)
127+
data_10ms[:pulse_10ms.size] = pulse_10ms
128+
129+
data_10s = np.zeros((sampleRate*10,), dtype=np.float64)
130+
data_10s[:-2] = 1
131+
132+
# %% for experiment
133+
134+
isi = 1.5
135+
isi_rand = 0.5
136+
numRepeats = 50
137+
138+
condition_list = [2, 3]
139+
waveforms = [data_2ms_10Hz, data_5ms, data_10ms, data_cosine]
140+
141+
opto_levels = np.array(level_list*numRepeats*len(condition_list)) # BLUE
142+
opto_conditions = condition_list*numRepeats*len(level_list)
143+
opto_conditions = np.sort(opto_conditions)
144+
opto_isis = np.random.random(opto_levels.shape) * isi_rand + isi
145+
146+
p = np.random.permutation(len(opto_levels))
147+
148+
# implement shuffle?
149+
opto_levels = opto_levels[p]
150+
opto_conditions = opto_conditions[p]
151+
152+
# %% for testing
153+
154+
if operation_mode == 'test_levels':
155+
isi = 2.0
156+
isi_rand = 0.0
157+
158+
numRepeats = 2
159+
160+
condition_list = [0]
161+
waveforms = [data_10s, data_10s]
162+
163+
opto_levels = np.array(level_list*numRepeats *
164+
len(condition_list)) # BLUE
165+
opto_conditions = condition_list*numRepeats*len(level_list)
166+
opto_conditions = np.sort(opto_conditions)
167+
opto_isis = np.random.random(opto_levels.shape) * isi_rand + isi
168+
169+
elif operation_mode == 'pretest':
170+
numRepeats = 1
171+
172+
condition_list = [0]
173+
data_2s = data_10s[-sampleRate*2:]
174+
waveforms = [data_2s]
175+
176+
opto_levels = np.array(level_list*numRepeats *
177+
len(condition_list)) # BLUE
178+
opto_conditions = condition_list*numRepeats*len(level_list)
179+
opto_conditions = np.sort(opto_conditions)
180+
opto_isis = [1]*len(opto_conditions)
181+
# %%
182+
183+
outputDirectory = agent.OUTPUT_DIR
184+
fileDate = str(datetime.datetime.now()).replace(':', '').replace(
185+
'.', '').replace('-', '').replace(' ', '')[2:14]
186+
fileName = outputDirectory + "/" + fileDate + '_'+mouseID + '.opto.pkl'
187+
188+
print('saving info to: ' + fileName)
189+
fl = open(fileName, 'wb')
190+
output = {}
191+
192+
output['opto_levels'] = opto_levels
193+
output['opto_conditions'] = opto_conditions
194+
output['opto_ISIs'] = opto_isis
195+
output['opto_waveforms'] = waveforms
196+
197+
pkl.dump(output, fl)
198+
fl.close()
199+
print('saved.')
200+
201+
# %%
202+
run_optotagging(opto_levels, opto_conditions,
203+
waveforms, opto_isis, float(sampleRate))
204+
205+
206+
# %%
207+
if __name__ == "__main__":
208+
import json
209+
import argparse
210+
211+
parser = argparse.ArgumentParser()
212+
parser.add_argument('json_params', type=str, )
213+
args, _ = parser.parse_known_args()
214+
215+
with open(args.json_params, 'r', ) as f:
216+
json_params = json.load(f)
217+
218+
logging.info('Optotagging with params: %s' % json_params)
219+
optotagging(**json_params)

0 commit comments

Comments
 (0)