15
15
package com .google .firebase .firestore .local ;
16
16
17
17
import static com .google .firebase .firestore .util .Assert .fail ;
18
+ import static com .google .firebase .firestore .util .Assert .hardAssert ;
18
19
20
+ import android .database .Cursor ;
19
21
import androidx .annotation .Nullable ;
20
22
import com .google .firebase .firestore .auth .User ;
21
23
import com .google .firebase .firestore .model .DocumentKey ;
22
24
import com .google .firebase .firestore .model .ResourcePath ;
23
25
import com .google .firebase .firestore .model .mutation .Mutation ;
24
26
import com .google .firebase .firestore .model .mutation .Overlay ;
27
+ import com .google .firebase .firestore .util .BackgroundQueue ;
28
+ import com .google .firebase .firestore .util .Executors ;
25
29
import com .google .firestore .v1 .Write ;
26
30
import com .google .protobuf .InvalidProtocolBufferException ;
31
+ import java .util .ArrayList ;
32
+ import java .util .Arrays ;
27
33
import java .util .HashMap ;
34
+ import java .util .List ;
28
35
import java .util .Map ;
36
+ import java .util .SortedSet ;
37
+ import java .util .concurrent .Executor ;
29
38
30
39
public class SQLiteDocumentOverlayCache implements DocumentOverlayCache {
31
40
private final SQLitePersistence db ;
@@ -47,7 +56,54 @@ public Overlay getOverlay(DocumentKey key) {
47
56
"SELECT overlay_mutation, largest_batch_id FROM document_overlays "
48
57
+ "WHERE uid = ? AND collection_path = ? AND document_id = ?" )
49
58
.binding (uid , collectionPath , documentId )
50
- .firstValue (this ::decodeOverlay );
59
+ .firstValue (row -> this .decodeOverlay (row .getBlob (0 ), row .getInt (1 )));
60
+ }
61
+
62
+ @ Override
63
+ public Map <DocumentKey , Overlay > getOverlays (SortedSet <DocumentKey > keys ) {
64
+ hardAssert (keys .comparator () == null , "getOverlays() requires natural order" );
65
+ Map <DocumentKey , Overlay > result = new HashMap <>();
66
+
67
+ BackgroundQueue backgroundQueue = new BackgroundQueue ();
68
+ ResourcePath currentCollection = ResourcePath .EMPTY ;
69
+ List <Object > accumulatedDocumentIds = new ArrayList <>();
70
+ for (DocumentKey key : keys ) {
71
+ if (!currentCollection .equals (key .getCollectionPath ())) {
72
+ processSingleCollection (result , backgroundQueue , currentCollection , accumulatedDocumentIds );
73
+ currentCollection = key .getCollectionPath ();
74
+ accumulatedDocumentIds .clear ();
75
+ }
76
+ accumulatedDocumentIds .add (key .getDocumentId ());
77
+ }
78
+
79
+ processSingleCollection (result , backgroundQueue , currentCollection , accumulatedDocumentIds );
80
+ backgroundQueue .drain ();
81
+ return result ;
82
+ }
83
+
84
+ /** Reads the overlays for the documents in a single collection. */
85
+ private void processSingleCollection (
86
+ Map <DocumentKey , Overlay > result ,
87
+ BackgroundQueue backgroundQueue ,
88
+ ResourcePath collectionPath ,
89
+ List <Object > documentIds ) {
90
+ if (documentIds .isEmpty ()) {
91
+ return ;
92
+ }
93
+
94
+ SQLitePersistence .LongQuery longQuery =
95
+ new SQLitePersistence .LongQuery (
96
+ db ,
97
+ "SELECT overlay_mutation, largest_batch_id FROM document_overlays "
98
+ + "WHERE uid = ? AND collection_path = ? AND document_id IN (" ,
99
+ Arrays .asList (uid , EncodedPath .encode (collectionPath )),
100
+ documentIds ,
101
+ ")" );
102
+ while (longQuery .hasMoreSubqueries ()) {
103
+ longQuery
104
+ .performNextSubquery ()
105
+ .forEach (row -> processOverlaysInBackground (backgroundQueue , result , row ));
106
+ }
51
107
}
52
108
53
109
private void saveOverlay (int largestBatchId , DocumentKey key , @ Nullable Mutation mutation ) {
@@ -83,49 +139,48 @@ public void removeOverlaysForBatchId(int batchId) {
83
139
84
140
@ Override
85
141
public Map <DocumentKey , Overlay > getOverlays (ResourcePath collection , int sinceBatchId ) {
86
- String collectionPath = EncodedPath .encode (collection );
87
-
88
142
Map <DocumentKey , Overlay > result = new HashMap <>();
143
+ BackgroundQueue backgroundQueue = new BackgroundQueue ();
89
144
db .query (
90
145
"SELECT overlay_mutation, largest_batch_id FROM document_overlays "
91
146
+ "WHERE uid = ? AND collection_path = ? AND largest_batch_id > ?" )
92
- .binding (uid , collectionPath , sinceBatchId )
93
- .forEach (
94
- row -> {
95
- Overlay overlay = decodeOverlay (row );
96
- result .put (overlay .getKey (), overlay );
97
- });
98
-
147
+ .binding (uid , EncodedPath .encode (collection ), sinceBatchId )
148
+ .forEach (row -> processOverlaysInBackground (backgroundQueue , result , row ));
149
+ backgroundQueue .drain ();
99
150
return result ;
100
151
}
101
152
102
153
@ Override
103
154
public Map <DocumentKey , Overlay > getOverlays (
104
155
String collectionGroup , int sinceBatchId , int count ) {
105
156
Map <DocumentKey , Overlay > result = new HashMap <>();
106
- Overlay [] lastOverlay = new Overlay [] {null };
157
+ String [] lastCollectionPath = new String [1 ];
158
+ String [] lastDocumentPath = new String [1 ];
159
+ int [] lastLargestBatchId = new int [1 ];
107
160
161
+ BackgroundQueue backgroundQueue = new BackgroundQueue ();
108
162
db .query (
109
- "SELECT overlay_mutation, largest_batch_id FROM document_overlays "
163
+ "SELECT overlay_mutation, largest_batch_id, collection_path, document_id "
164
+ + " FROM document_overlays "
110
165
+ "WHERE uid = ? AND collection_group = ? AND largest_batch_id > ? "
111
166
+ "ORDER BY largest_batch_id, collection_path, document_id LIMIT ?" )
112
167
.binding (uid , collectionGroup , sinceBatchId , count )
113
168
.forEach (
114
169
row -> {
115
- lastOverlay [0 ] = decodeOverlay (row );
116
- result .put (lastOverlay [0 ].getKey (), lastOverlay [0 ]);
170
+ lastLargestBatchId [0 ] = row .getInt (1 );
171
+ lastCollectionPath [0 ] = row .getString (2 );
172
+ lastDocumentPath [0 ] = row .getString (3 );
173
+ processOverlaysInBackground (backgroundQueue , result , row );
117
174
});
118
175
119
- if (lastOverlay [0 ] == null ) {
176
+ if (lastCollectionPath [0 ] == null ) {
120
177
return result ;
121
178
}
122
179
123
180
// This function should not return partial batch overlays, even if the number of overlays in the
124
181
// result set exceeds the given `count` argument. Since the `LIMIT` in the above query might
125
182
// result in a partial batch, the following query appends any remaining overlays for the last
126
183
// batch.
127
- DocumentKey key = lastOverlay [0 ].getKey ();
128
- String encodedCollectionPath = EncodedPath .encode (key .getCollectionPath ());
129
184
db .query (
130
185
"SELECT overlay_mutation, largest_batch_id FROM document_overlays "
131
186
+ "WHERE uid = ? AND collection_group = ? "
@@ -134,23 +189,35 @@ public Map<DocumentKey, Overlay> getOverlays(
134
189
.binding (
135
190
uid ,
136
191
collectionGroup ,
137
- encodedCollectionPath ,
138
- encodedCollectionPath ,
139
- key .getDocumentId (),
140
- lastOverlay [0 ].getLargestBatchId ())
141
- .forEach (
142
- row -> {
143
- Overlay overlay = decodeOverlay (row );
144
- result .put (overlay .getKey (), overlay );
145
- });
146
-
192
+ lastCollectionPath [0 ],
193
+ lastCollectionPath [0 ],
194
+ lastDocumentPath [0 ],
195
+ lastLargestBatchId [0 ])
196
+ .forEach (row -> processOverlaysInBackground (backgroundQueue , result , row ));
197
+ backgroundQueue .drain ();
147
198
return result ;
148
199
}
149
200
150
- private Overlay decodeOverlay (android .database .Cursor row ) {
201
+ private void processOverlaysInBackground (
202
+ BackgroundQueue backgroundQueue , Map <DocumentKey , Overlay > results , Cursor row ) {
203
+ byte [] rawMutation = row .getBlob (0 );
204
+ int largestBatchId = row .getInt (1 );
205
+
206
+ // Since scheduling background tasks incurs overhead, we only dispatch to a
207
+ // background thread if there are still some documents remaining.
208
+ Executor executor = row .isLast () ? Executors .DIRECT_EXECUTOR : backgroundQueue ;
209
+ executor .execute (
210
+ () -> {
211
+ Overlay overlay = decodeOverlay (rawMutation , largestBatchId );
212
+ synchronized (results ) {
213
+ results .put (overlay .getKey (), overlay );
214
+ }
215
+ });
216
+ }
217
+
218
+ private Overlay decodeOverlay (byte [] rawMutation , int largestBatchId ) {
151
219
try {
152
- Write write = Write .parseFrom (row .getBlob (0 ));
153
- int largestBatchId = row .getInt (1 );
220
+ Write write = Write .parseFrom (rawMutation );
154
221
Mutation mutation = serializer .decodeMutation (write );
155
222
return Overlay .create (largestBatchId , mutation );
156
223
} catch (InvalidProtocolBufferException e ) {
0 commit comments