|
5 | 5 |
|
6 | 6 | @author: evenhuis |
7 | 7 | """ |
8 | | -#from Parse_OMERO_Properties import datasetId, imageId, plateId |
| 8 | +# from Parse_OMERO_Properties import datasetId, imageId, plateId |
9 | 9 |
|
10 | 10 | import sys |
11 | 11 | import argparse |
12 | 12 | import os |
13 | 13 |
|
| 14 | + |
14 | 15 | # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - |
15 | | -def download_dataset( conn, Id, path, orig=False, tif=False ): |
16 | | -# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - |
17 | | - ''' download a dataset from OMERO |
18 | | - INPUT : conn : the connection needs to be open |
19 | | - Id : ID of the dataset |
20 | | - path : location of local filesystem |
21 | | - fmt : "o" is orginal , "t" is tiff |
22 | | - ''' |
23 | | - |
| 16 | +def download_dataset(conn, Id, path, orig=False, tif=False): |
| 17 | + # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - |
| 18 | + """ |
| 19 | + download a dataset from OMERO |
| 20 | + INPUT : conn, the connection needs to be open |
| 21 | + Id : ID of the dataset |
| 22 | + path : location of local filesystem |
| 23 | + fmt : "o" is original , "t" is tiff |
| 24 | + """ |
| 25 | + |
24 | 26 | # get the data set |
25 | | - dataset = conn.getObject('Dataset',Id) |
26 | | - if( dataset==None ): |
| 27 | + dataset = conn.getObject('Dataset', Id) |
| 28 | + if (dataset == None): |
27 | 29 | print("Dataset ID {} not found in group".format(Id)) |
28 | 30 | sys.exit(1) |
29 | 31 | print("here") |
30 | 32 |
|
31 | 33 | # get the images |
32 | | - imgs = list(dataset.listChildren()) |
33 | | - |
| 34 | + imgs = list(dataset.listChildren()) |
| 35 | + |
34 | 36 | # this is the directory to place the data in |
35 | 37 | ds_name = dataset.getName() |
36 | | - print("{}/".format(ds_name) ) |
37 | | - reldir = os.path.join( path, ds_name) |
38 | | - if( not os.path.isdir(reldir) ): |
| 38 | + print("{}/".format(ds_name)) |
| 39 | + reldir = os.path.join(path, ds_name) |
| 40 | + if (not os.path.isdir(reldir)): |
39 | 41 | os.makedirs(reldir) |
40 | | - |
| 42 | + |
41 | 43 | for img in imgs: |
42 | | - print(" "*len(ds_name)+"/{}".format(img.getName())) |
43 | | - |
44 | | - if( orig ): |
| 44 | + print(" " * len(ds_name) + "/{}".format(img.getName())) |
| 45 | + |
| 46 | + if (orig): |
45 | 47 | for orig in img.getImportedImageFiles(): |
46 | 48 | name = orig.getName() |
47 | | - file_path = os.path.join( reldir, name) |
| 49 | + file_path = os.path.join(reldir, name) |
48 | 50 |
|
49 | | - if( not os.path.exists( file_path) ): |
| 51 | + if (not os.path.exists(file_path)): |
50 | 52 | with open(str(file_path), 'w') as f: |
51 | 53 | for chunk in orig.getFileInChunks(): |
52 | 54 | f.write(chunk) |
53 | 55 |
|
54 | | - if( tif ): |
55 | | - name = os.path.basename(img.getName())+".ome.tif" |
| 56 | + if (tif): |
| 57 | + name = os.path.basename(img.getName()) + ".ome.tif" |
56 | 58 | file_path = os.path.join(reldir, name) |
57 | 59 | file_size, block_gen = img.exportOmeTiff(bufsize=65536) |
58 | 60 | with open(str(file_path), "wb") as f: |
59 | 61 | for piece in block_gen: |
60 | 62 | f.write(piece) |
61 | | - |
62 | | - return |
| 63 | + |
| 64 | + return |
| 65 | + |
63 | 66 |
|
64 | 67 | """ |
65 | 68 | start-code |
66 | 69 | """ |
67 | 70 |
|
68 | | - |
69 | 71 | parser = argparse.ArgumentParser(description='Download datasets and projects from OMERO') |
70 | | -parser.add_argument('-p','--project', nargs="+", default=[],help="IDs of projects to download") |
71 | | -parser.add_argument('-d','--dataset', nargs="+", default=[],help="IDs of datasets to download") |
72 | | -parser.add_argument('-g','--group' , nargs="?", help="name of group") |
73 | | -parser.add_argument('-o','--orig' , action="store_true", default=False, help="download originals") |
74 | | -parser.add_argument('-t','--tif' , action="store_true", default=False, help="download OME-TIFs" ) |
| 72 | +parser.add_argument('-p', '--project', nargs="+", default=[], help="IDs of projects to download") |
| 73 | +parser.add_argument('-d', '--dataset', nargs="+", default=[], help="IDs of datasets to download") |
| 74 | +parser.add_argument('-g', '--group', nargs="?", help="name of group") |
| 75 | +parser.add_argument('-o', '--orig', action="store_true", default=False, help="download originals") |
| 76 | +parser.add_argument('-t', '--tif', action="store_true", default=False, help="download OME-TIFs") |
75 | 77 |
|
76 | 78 | args = parser.parse_args() |
77 | 79 |
|
78 | 80 | # Create a connection |
79 | 81 | # =================== |
| 82 | + |
| 83 | + |
| 84 | +from omero.gateway import BlitzGateway |
| 85 | +from Parse_OMERO_Properties import USERNAME, PASSWORD, HOST, PORT |
| 86 | + |
| 87 | +print(HOST) |
| 88 | + |
| 89 | +conn = BlitzGateway(USERNAME, PASSWORD, host=HOST, port=PORT) |
| 90 | + |
80 | 91 | try: |
81 | | - from omero.gateway import BlitzGateway |
82 | | - from Parse_OMERO_Properties import USERNAME, PASSWORD, HOST, PORT |
83 | | - print(HOST) |
84 | | - conn = BlitzGateway(USERNAME, PASSWORD, host=HOST, port=PORT) |
85 | 92 | conn.connect() |
86 | | - |
| 93 | + |
87 | 94 | user = conn.getUser() |
88 | 95 | print "Current user:" |
89 | 96 | print " ID:", user.getId() |
90 | 97 | print " Username:", user.getName() |
91 | 98 | print " Full Name:", user.getFullName() |
92 | 99 |
|
93 | | - if( args.group is not None ): |
| 100 | + if args.group is not None: |
94 | 101 | print("change group") |
95 | 102 | new_group = args.group |
96 | | - groups = [ g.getName() for g in conn.listGroups() ] |
| 103 | + groups = [g.getName() for g in conn.listGroups()] |
97 | 104 | print(groups) |
98 | | - if( new_group not in groups ): |
| 105 | + if new_group not in groups: |
99 | 106 | print("{} not found in groups:".format(new_group)) |
100 | 107 | for gn in groups: |
101 | 108 | print(" {}".format(gn)) |
102 | 109 | sys.exit(1) |
103 | 110 | else: |
104 | 111 | conn.setGroupNameForSession(new_group) |
105 | | - |
106 | | - path = os.getcwd() |
107 | | - print( args.dataset ) |
| 112 | + |
| 113 | + path = os.path.join(os.getcwd(), 'downloads') |
| 114 | + print(args.dataset) |
108 | 115 | for d_id in args.dataset: |
109 | | - download_dataset( conn, d_id, path, orig=args.orig, tif=args.tif ) |
| 116 | + download_dataset(conn, d_id, path, orig=args.orig, tif=args.tif) |
110 | 117 |
|
111 | 118 | print(args.project) |
112 | 119 | for p_id in args.project: |
113 | | - project = conn.getObject('Project',p_id) |
114 | | - path_p = os.path.join(path,project.getName()) |
115 | | - if( project==None ): |
| 120 | + project = conn.getObject('Project', p_id) |
| 121 | + path_p = os.path.join(path, project.getName()) |
| 122 | + if project is None: |
116 | 123 | print("project ID {} not found in group {}".format(p_id, orig=args.orig, tif=args.tif)) |
117 | 124 | sys.exit(1) |
118 | 125 |
|
119 | 126 | for ds in list(project.listChildren()): |
120 | | - download_dataset( conn, ds.getId(), path_p, orig=args.orig, tif=args.tif ) |
| 127 | + download_dataset(conn, ds.getId(), path_p, orig=args.orig, tif=args.tif) |
| 128 | + |
| 129 | +except Exception: |
| 130 | + print "There was a problem trying to connect" |
| 131 | + sys.exit(1) |
121 | 132 |
|
122 | | - |
123 | | -finally: |
| 133 | +finally: |
124 | 134 | # When you are done, close the session to free up server resources. |
125 | 135 | conn.close() |
126 | | - |
127 | | - |
|
0 commit comments