4040import java .util .Date ;
4141import java .util .Map ;
4242import java .util .concurrent .atomic .AtomicBoolean ;
43- import java .util .concurrent .atomic .AtomicInteger ;
44- import java .util .concurrent .atomic .AtomicLong ;
45- import java .util .function .Function ;
4643
4744import static com .mongodb .ReadPreference .primary ;
4845import static com .mongodb .assertions .Assertions .notNull ;
@@ -106,7 +103,7 @@ public BsonValue getId() {
106103
107104 @ Override
108105 public void subscribe (final Subscriber <? super Void > s ) {
109- Mono .defer (() -> {
106+ Mono .deferContextual ( ctx -> {
110107 AtomicBoolean terminated = new AtomicBoolean (false );
111108 Timeout timeout = TimeoutContext .startTimeout (timeoutMs );
112109 return createCheckAndCreateIndexesMono (timeout )
@@ -120,7 +117,7 @@ public void subscribe(final Subscriber<? super Void> s) {
120117 return originalError ;
121118 })
122119 .then (Mono .error (originalError )))
123- .doOnCancel (() -> createCancellationMono (terminated , timeout ).subscribe ())
120+ .doOnCancel (() -> createCancellationMono (terminated , timeout ).contextWrite ( ctx ). subscribe ())
124121 .then ();
125122 }).subscribe (s );
126123 }
@@ -149,38 +146,15 @@ public void subscribe(final Subscriber<? super ObjectId> subscriber) {
149146 }
150147
151148 private Mono <Void > createCheckAndCreateIndexesMono (@ Nullable final Timeout timeout ) {
152- AtomicBoolean collectionExists = new AtomicBoolean (false );
153- return Mono .create (sink -> findAllInCollection (filesCollection , timeout ).subscribe (
154- d -> collectionExists .set (true ),
155- sink ::error ,
156- () -> {
157- if (collectionExists .get ()) {
158- sink .success ();
159- } else {
160- checkAndCreateIndex (filesCollection .withReadPreference (primary ()), FILES_INDEX , timeout )
161- .doOnSuccess (i -> checkAndCreateIndex (chunksCollection .withReadPreference (primary ()), CHUNKS_INDEX , timeout )
162- .subscribe (unused -> {}, sink ::error , sink ::success ))
163- .subscribe (unused -> {}, sink ::error );
164- }
165- })
166- );
167- }
168-
169- private Mono <Document > findAllInCollection (final MongoCollection <GridFSFile > collection , @ Nullable final Timeout timeout ) {
170- return collectionWithTimeoutDeferred (collection
171- .withDocumentClass (Document .class )
172- .withReadPreference (primary ()), timeout )
173- .flatMap (wrappedCollection -> {
174- if (clientSession != null ) {
175- return Mono .from (wrappedCollection .find (clientSession )
176- .projection (PROJECTION )
177- .first ());
178- } else {
179- return Mono .from (wrappedCollection .find ()
180- .projection (PROJECTION )
181- .first ());
182- }
183- });
149+ return collectionWithTimeoutDeferred (filesCollection .withDocumentClass (Document .class ).withReadPreference (primary ()), timeout )
150+ .map (collection -> clientSession != null ? collection .find (clientSession ) : collection .find ())
151+ .flatMap (findPublisher -> Mono .from (findPublisher .projection (PROJECTION ).first ()))
152+ .switchIfEmpty (Mono .defer (() ->
153+ checkAndCreateIndex (filesCollection .withReadPreference (primary ()), FILES_INDEX , timeout )
154+ .then (checkAndCreateIndex (chunksCollection .withReadPreference (primary ()), CHUNKS_INDEX , timeout ))
155+ .then (Mono .empty ())
156+ ))
157+ .then ();
184158 }
185159
186160 private <T > Mono <Boolean > hasIndex (final MongoCollection <T > collection , final Document index , @ Nullable final Timeout timeout ) {
@@ -228,40 +202,37 @@ private <T> Mono<String> createIndexMono(final MongoCollection<T> collection, fi
228202 }
229203
230204 private Mono <Long > createSaveChunksMono (final AtomicBoolean terminated , @ Nullable final Timeout timeout ) {
231- return Mono .create (sink -> {
232- AtomicLong lengthInBytes = new AtomicLong (0 );
233- AtomicInteger chunkIndex = new AtomicInteger (0 );
234- new ResizingByteBufferFlux (source , chunkSizeBytes )
235- .takeUntilOther (createMonoTimer (timeout ))
236- .flatMap ((Function <ByteBuffer , Publisher <InsertOneResult >>) byteBuffer -> {
237- if (terminated .get ()) {
238- return Mono .empty ();
239- }
240- byte [] byteArray = new byte [byteBuffer .remaining ()];
241- if (byteBuffer .hasArray ()) {
242- System .arraycopy (byteBuffer .array (), byteBuffer .position (), byteArray , 0 , byteBuffer .remaining ());
243- } else {
244- byteBuffer .mark ();
245- byteBuffer .get (byteArray );
246- byteBuffer .reset ();
247- }
248- Binary data = new Binary (byteArray );
249- lengthInBytes .addAndGet (data .length ());
205+ return new ResizingByteBufferFlux (source , chunkSizeBytes )
206+ .takeUntilOther (createMonoTimer (timeout ))
207+ .index ()
208+ .flatMap (indexAndBuffer -> {
209+ if (terminated .get ()) {
210+ return Mono .empty ();
211+ }
212+ Long index = indexAndBuffer .getT1 ();
213+ ByteBuffer byteBuffer = indexAndBuffer .getT2 ();
214+ byte [] byteArray = new byte [byteBuffer .remaining ()];
215+ if (byteBuffer .hasArray ()) {
216+ System .arraycopy (byteBuffer .array (), byteBuffer .position (), byteArray , 0 , byteBuffer .remaining ());
217+ } else {
218+ byteBuffer .mark ();
219+ byteBuffer .get (byteArray );
220+ byteBuffer .reset ();
221+ }
222+ Binary data = new Binary (byteArray );
250223
251- Document chunkDocument = new Document ("files_id" , fileId )
252- .append ("n" , chunkIndex . getAndIncrement ())
253- .append ("data" , data );
224+ Document chunkDocument = new Document ("files_id" , fileId )
225+ .append ("n" , index . intValue ())
226+ .append ("data" , data );
254227
255- if (clientSession == null ) {
256- return collectionWithTimeout (chunksCollection , timeout , TIMEOUT_ERROR_MESSAGE ).insertOne (chunkDocument );
257- } else {
258- return collectionWithTimeout (chunksCollection , timeout , TIMEOUT_ERROR_MESSAGE ).insertOne (clientSession ,
259- chunkDocument );
260- }
228+ Publisher <InsertOneResult > insertOnePublisher = clientSession == null
229+ ? collectionWithTimeout (chunksCollection , timeout , TIMEOUT_ERROR_MESSAGE ).insertOne (chunkDocument )
230+ : collectionWithTimeout (chunksCollection , timeout , TIMEOUT_ERROR_MESSAGE )
231+ .insertOne (clientSession , chunkDocument );
261232
262- })
263- . subscribe ( null , sink :: error , () -> sink . success ( lengthInBytes . get ()));
264- } );
233+ return Mono . from ( insertOnePublisher ). thenReturn ( data . length ());
234+ })
235+ . reduce ( 0L , Long :: sum );
265236 }
266237
267238 /**
0 commit comments