20
20
CFN_SUCCESS = "SUCCESS"
21
21
CFN_FAILED = "FAILED"
22
22
ENV_KEY_MOUNT_PATH = "MOUNT_PATH"
23
+ ENV_KEY_SKIP_CLEANUP = "SKIP_CLEANUP"
23
24
24
25
CUSTOM_RESOURCE_OWNER_TAG = "aws-cdk:cr-owned"
25
26
@@ -45,6 +46,7 @@ def cfn_error(message=None):
45
46
try :
46
47
source_bucket_names = props ['SourceBucketNames' ]
47
48
source_object_keys = props ['SourceObjectKeys' ]
49
+ source_markers = props .get ('SourceMarkers' , None )
48
50
dest_bucket_name = props ['DestinationBucketName' ]
49
51
dest_bucket_prefix = props .get ('DestinationBucketKeyPrefix' , '' )
50
52
retain_on_delete = props .get ('RetainOnDelete' , "true" ) == "true"
@@ -55,6 +57,11 @@ def cfn_error(message=None):
55
57
exclude = props .get ('Exclude' , [])
56
58
include = props .get ('Include' , [])
57
59
60
+ # backwards compatibility - if "SourceMarkers" is not specified,
61
+ # assume all sources have an empty market map
62
+ if source_markers is None :
63
+ source_markers = [{} for i in range (len (source_bucket_names ))]
64
+
58
65
default_distribution_path = dest_bucket_prefix
59
66
if not default_distribution_path .endswith ("/" ):
60
67
default_distribution_path += "/"
@@ -71,7 +78,7 @@ def cfn_error(message=None):
71
78
if dest_bucket_prefix == "/" :
72
79
dest_bucket_prefix = ""
73
80
74
- s3_source_zips = map (lambda name , key : "s3://%s/%s" % (name , key ), source_bucket_names , source_object_keys )
81
+ s3_source_zips = list ( map (lambda name , key : "s3://%s/%s" % (name , key ), source_bucket_names , source_object_keys ) )
75
82
s3_dest = "s3://%s/%s" % (dest_bucket_name , dest_bucket_prefix )
76
83
old_s3_dest = "s3://%s/%s" % (old_props .get ("DestinationBucketName" , "" ), old_props .get ("DestinationBucketKeyPrefix" , "" ))
77
84
@@ -106,7 +113,7 @@ def cfn_error(message=None):
106
113
aws_command ("s3" , "rm" , old_s3_dest , "--recursive" )
107
114
108
115
if request_type == "Update" or request_type == "Create" :
109
- s3_deploy (s3_source_zips , s3_dest , user_metadata , system_metadata , prune , exclude , include )
116
+ s3_deploy (s3_source_zips , s3_dest , user_metadata , system_metadata , prune , exclude , include , source_markers )
110
117
111
118
if distribution_id :
112
119
cloudfront_invalidate (distribution_id , distribution_paths )
@@ -120,7 +127,11 @@ def cfn_error(message=None):
120
127
121
128
#---------------------------------------------------------------------------------------------------
122
129
# populate all files from s3_source_zips to a destination bucket
123
- def s3_deploy (s3_source_zips , s3_dest , user_metadata , system_metadata , prune , exclude , include ):
130
+ def s3_deploy (s3_source_zips , s3_dest , user_metadata , system_metadata , prune , exclude , include , source_markers ):
131
+ # list lengths are equal
132
+ if len (s3_source_zips ) != len (source_markers ):
133
+ raise Exception ("'source_markers' and 's3_source_zips' must be the same length" )
134
+
124
135
# create a temporary working directory in /tmp or if enabled an attached efs volume
125
136
if ENV_KEY_MOUNT_PATH in os .environ :
126
137
workdir = os .getenv (ENV_KEY_MOUNT_PATH ) + "/" + str (uuid4 ())
@@ -136,13 +147,16 @@ def s3_deploy(s3_source_zips, s3_dest, user_metadata, system_metadata, prune, ex
136
147
137
148
try :
138
149
# download the archive from the source and extract to "contents"
139
- for s3_source_zip in s3_source_zips :
150
+ for i in range (len (s3_source_zips )):
151
+ s3_source_zip = s3_source_zips [i ]
152
+ markers = source_markers [i ]
153
+
140
154
archive = os .path .join (workdir , str (uuid4 ()))
141
155
logger .info ("archive: %s" % archive )
142
156
aws_command ("s3" , "cp" , s3_source_zip , archive )
143
157
logger .info ("| extracting archive to: %s\n " % contents_dir )
144
- with ZipFile ( archive , "r" ) as zip :
145
- zip . extractall ( contents_dir )
158
+ logger . info ( "| markers: %s" % markers )
159
+ extract_and_replace_markers ( archive , contents_dir , markers )
146
160
147
161
# sync from "contents" to destination
148
162
@@ -163,7 +177,8 @@ def s3_deploy(s3_source_zips, s3_dest, user_metadata, system_metadata, prune, ex
163
177
s3_command .extend (create_metadata_args (user_metadata , system_metadata ))
164
178
aws_command (* s3_command )
165
179
finally :
166
- shutil .rmtree (workdir )
180
+ if not os .getenv (ENV_KEY_SKIP_CLEANUP ):
181
+ shutil .rmtree (workdir )
167
182
168
183
#---------------------------------------------------------------------------------------------------
169
184
# invalidate files in the CloudFront distribution edge caches
@@ -257,3 +272,29 @@ def bucket_owned(bucketName, keyPrefix):
257
272
logger .info ("| error getting tags from bucket" )
258
273
logger .exception (e )
259
274
return False
275
+
276
+ # extract archive and replace markers in output files
277
+ def extract_and_replace_markers (archive , contents_dir , markers ):
278
+ with ZipFile (archive , "r" ) as zip :
279
+ zip .extractall (contents_dir )
280
+
281
+ # replace markers for this source
282
+ for file in zip .namelist ():
283
+ file_path = os .path .join (contents_dir , file )
284
+ if os .path .isdir (file_path ): continue
285
+ replace_markers (file_path , markers )
286
+
287
+ def replace_markers (filename , markers ):
288
+ # convert the dict of string markers to binary markers
289
+ replace_tokens = dict ([(k .encode ('utf-8' ), v .encode ('utf-8' )) for k , v in markers .items ()])
290
+
291
+ outfile = filename + '.new'
292
+ with open (filename , 'rb' ) as fi , open (outfile , 'wb' ) as fo :
293
+ for line in fi :
294
+ for token in replace_tokens :
295
+ line = line .replace (token , replace_tokens [token ])
296
+ fo .write (line )
297
+
298
+ # # delete the original file and rename the new one to the original
299
+ os .remove (filename )
300
+ os .rename (outfile , filename )
0 commit comments