34
34
import org .reactivestreams .Subscriber ;
35
35
import reactor .core .publisher .Flux ;
36
36
import reactor .core .publisher .Mono ;
37
+ import reactor .util .function .Tuple2 ;
37
38
38
39
import java .nio .ByteBuffer ;
39
40
import java .util .Date ;
40
41
import java .util .Map ;
41
42
import java .util .concurrent .atomic .AtomicBoolean ;
42
- import java .util .concurrent .atomic .AtomicInteger ;
43
- import java .util .concurrent .atomic .AtomicLong ;
44
- import java .util .function .Consumer ;
45
43
import java .util .function .Function ;
46
44
47
45
import static com .mongodb .ReadPreference .primary ;
48
46
import static com .mongodb .assertions .Assertions .notNull ;
49
47
50
-
51
48
/**
52
49
* <p>This class is not part of the public API and may be removed or changed at any time</p>
53
50
*/
@@ -98,31 +95,22 @@ public BsonValue getId() {
98
95
99
96
@ Override
100
97
public void subscribe (final Subscriber <? super Void > s ) {
101
- Mono .< Void > create ( sink -> {
98
+ Mono .deferContextual ( ctx -> {
102
99
AtomicBoolean terminated = new AtomicBoolean (false );
103
- sink .onCancel (() -> createCancellationMono (terminated ).subscribe ());
104
-
105
- Consumer <Throwable > errorHandler = e -> createCancellationMono (terminated )
106
- .doOnError (i -> sink .error (e ))
107
- .doOnSuccess (i -> sink .error (e ))
108
- .subscribe ();
109
-
110
- Consumer <Long > saveFileDataMono = l -> createSaveFileDataMono (terminated , l )
111
- .doOnError (errorHandler )
112
- .doOnSuccess (i -> sink .success ())
113
- .subscribe ();
114
-
115
- Consumer <Void > saveChunksMono = i -> createSaveChunksMono (terminated )
116
- .doOnError (errorHandler )
117
- .doOnSuccess (saveFileDataMono )
118
- .subscribe ();
119
-
120
- createCheckAndCreateIndexesMono ()
121
- .doOnError (errorHandler )
122
- .doOnSuccess (saveChunksMono )
123
- .subscribe ();
124
- })
125
- .subscribe (s );
100
+ return createCheckAndCreateIndexesMono ()
101
+ .then (createSaveChunksMono (terminated ))
102
+ .flatMap (lengthInBytes -> createSaveFileDataMono (terminated , lengthInBytes ))
103
+ .onErrorResume (originalError ->
104
+ createCancellationMono (terminated )
105
+ .onErrorMap (cancellationError -> {
106
+ // Timeout exception might occur during cancellation. It gets suppressed.
107
+ originalError .addSuppressed (cancellationError );
108
+ return originalError ;
109
+ })
110
+ .then (Mono .error (originalError )))
111
+ .doOnCancel (() -> createCancellationMono (terminated ).contextWrite (ctx ).subscribe ())
112
+ .then ();
113
+ }).subscribe (s );
126
114
}
127
115
128
116
public GridFSUploadPublisher <ObjectId > withObjectId () {
@@ -156,28 +144,14 @@ private Mono<Void> createCheckAndCreateIndexesMono() {
156
144
} else {
157
145
findPublisher = collection .find ();
158
146
}
159
- AtomicBoolean collectionExists = new AtomicBoolean (false );
147
+ return Mono .from (findPublisher .projection (PROJECTION ).first ())
148
+ .switchIfEmpty (Mono .defer (() ->
149
+ checkAndCreateIndex (filesCollection .withReadPreference (primary ()), FILES_INDEX )
150
+ .then (checkAndCreateIndex (chunksCollection .withReadPreference (primary ()), CHUNKS_INDEX ))
151
+ .then (Mono .fromCallable (Document ::new ))
152
+ ))
153
+ .then ();
160
154
161
- return Mono .create (sink -> Mono .from (findPublisher .projection (PROJECTION ).first ())
162
- .subscribe (
163
- d -> collectionExists .set (true ),
164
- sink ::error ,
165
- () -> {
166
- if (collectionExists .get ()) {
167
- sink .success ();
168
- } else {
169
- checkAndCreateIndex (filesCollection .withReadPreference (primary ()), FILES_INDEX )
170
- .doOnError (sink ::error )
171
- .doOnSuccess (i -> {
172
- checkAndCreateIndex (chunksCollection .withReadPreference (primary ()), CHUNKS_INDEX )
173
- .doOnError (sink ::error )
174
- .doOnSuccess (sink ::success )
175
- .subscribe ();
176
- })
177
- .subscribe ();
178
- }
179
- })
180
- );
181
155
}
182
156
183
157
private <T > Mono <Boolean > hasIndex (final MongoCollection <T > collection , final Document index ) {
@@ -189,29 +163,23 @@ private <T> Mono<Boolean> hasIndex(final MongoCollection<T> collection, final Do
189
163
}
190
164
191
165
return Flux .from (listIndexesPublisher )
192
- .collectList ()
193
- .map (indexes -> {
194
- boolean hasIndex = false ;
195
- for (Document result : indexes ) {
196
- Document indexDoc = result .get ("key" , new Document ());
197
- for (final Map .Entry <String , Object > entry : indexDoc .entrySet ()) {
198
- if (entry .getValue () instanceof Number ) {
199
- entry .setValue (((Number ) entry .getValue ()).intValue ());
200
- }
201
- }
202
- if (indexDoc .equals (index )) {
203
- hasIndex = true ;
204
- break ;
166
+ .filter ((result ) -> {
167
+ Document indexDoc = result .get ("key" , new Document ());
168
+ for (final Map .Entry <String , Object > entry : indexDoc .entrySet ()) {
169
+ if (entry .getValue () instanceof Number ) {
170
+ entry .setValue (((Number ) entry .getValue ()).intValue ());
205
171
}
206
172
}
207
- return hasIndex ;
208
- });
173
+ return indexDoc .equals (index );
174
+ })
175
+ .take (1 )
176
+ .hasElements ();
209
177
}
210
178
211
179
private <T > Mono <Void > checkAndCreateIndex (final MongoCollection <T > collection , final Document index ) {
212
180
return hasIndex (collection , index ).flatMap (hasIndex -> {
213
181
if (!hasIndex ) {
214
- return createIndexMono (collection , index ).flatMap ( s -> Mono . empty () );
182
+ return createIndexMono (collection , index ).then ( );
215
183
} else {
216
184
return Mono .empty ();
217
185
}
@@ -223,14 +191,14 @@ private <T> Mono<String> createIndexMono(final MongoCollection<T> collection, fi
223
191
}
224
192
225
193
private Mono <Long > createSaveChunksMono (final AtomicBoolean terminated ) {
226
- return Mono .create (sink -> {
227
- AtomicLong lengthInBytes = new AtomicLong (0 );
228
- AtomicInteger chunkIndex = new AtomicInteger (0 );
229
- new ResizingByteBufferFlux (source , chunkSizeBytes )
230
- .flatMap ((Function <ByteBuffer , Publisher <InsertOneResult >>) byteBuffer -> {
194
+ return new ResizingByteBufferFlux (source , chunkSizeBytes )
195
+ .index ()
196
+ .flatMap ((Function <Tuple2 <Long , ByteBuffer >, Publisher <Integer >>) indexAndBuffer -> {
231
197
if (terminated .get ()) {
232
198
return Mono .empty ();
233
199
}
200
+ Long index = indexAndBuffer .getT1 ();
201
+ ByteBuffer byteBuffer = indexAndBuffer .getT2 ();
234
202
byte [] byteArray = new byte [byteBuffer .remaining ()];
235
203
if (byteBuffer .hasArray ()) {
236
204
System .arraycopy (byteBuffer .array (), byteBuffer .position (), byteArray , 0 , byteBuffer .remaining ());
@@ -240,18 +208,19 @@ private Mono<Long> createSaveChunksMono(final AtomicBoolean terminated) {
240
208
byteBuffer .reset ();
241
209
}
242
210
Binary data = new Binary (byteArray );
243
- lengthInBytes .addAndGet (data .length ());
244
211
245
212
Document chunkDocument = new Document ("files_id" , fileId )
246
- .append ("n" , chunkIndex . getAndIncrement ())
213
+ .append ("n" , index . intValue ())
247
214
.append ("data" , data );
248
215
249
- return clientSession == null ? chunksCollection .insertOne (chunkDocument )
216
+ Publisher <InsertOneResult > insertOnePublisher = clientSession == null
217
+ ? chunksCollection .insertOne (chunkDocument )
250
218
: chunksCollection .insertOne (clientSession , chunkDocument );
219
+
220
+ return Mono .from (insertOnePublisher ).thenReturn (data .length ());
251
221
})
252
- .subscribe (null , sink ::error , () -> sink .success (lengthInBytes .get ()));
253
- });
254
- }
222
+ .reduce (0L , Long ::sum );
223
+ }
255
224
256
225
private Mono <InsertOneResult > createSaveFileDataMono (final AtomicBoolean terminated , final long lengthInBytes ) {
257
226
if (terminated .compareAndSet (false , true )) {
0 commit comments