Skip to content

Commit b022a71

Browse files
test: reproduce data duplication error on node 14 (#1458)
* Create the same test as before with setImmediate * Rename parameter in sleep function to ticks * Remove only * Remove TODO * Linting fix * Add a comment to describe the sleep function * Move the comment about 150 rows to the right place * Add comment about backpressure * increase the timeout of the test in question * Remove only * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * Do not rely on the removed service anymore * linter fix * linting fix --------- Co-authored-by: Owl Bot <gcf-owl-bot[bot]@users.noreply.github.com>
1 parent 6569c41 commit b022a71

File tree

2 files changed

+60
-1
lines changed

2 files changed

+60
-1
lines changed

src/mutation.ts

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,8 @@ export class Mutation {
129129
bytes: Buffer | string,
130130
options?: ConvertFromBytesOptions
131131
): Buffer | Value | string {
132-
const buf = bytes instanceof Buffer ? bytes : Buffer.from(bytes, 'base64');
132+
const buf =
133+
bytes instanceof Buffer ? bytes : Buffer.from(bytes as string, 'base64');
133134
if (options && options.isPossibleNumber && buf.length === 8) {
134135
// eslint-disable-next-line @typescript-eslint/no-explicit-any
135136
const num = Long.fromBytes(buf as any).toNumber();

test/readrows.ts

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -402,6 +402,64 @@ describe('Bigtable/ReadRows', () => {
402402
}
403403
})();
404404
});
405+
it('should return row data in the right order with a predictable sleep function', function (done) {
406+
this.timeout(600000);
407+
const keyFrom = undefined;
408+
const keyTo = undefined;
409+
// the server will error after sending this chunk (not row)
410+
const errorAfterChunkNo = 100;
411+
const dataResults = [];
412+
413+
// keyTo and keyFrom are not provided so they will be determined from
414+
// the request that is passed in.
415+
service.setService({
416+
ReadRows: ReadRowsImpl.createService({
417+
errorAfterChunkNo: 100, // the server will error after sending this chunk (not row)
418+
valueSize: 1,
419+
chunkSize: 1,
420+
chunksPerResponse: 1,
421+
debugLog,
422+
}) as ServerImplementationInterface,
423+
});
424+
const sleep = (ticks: number) => {
425+
// Adds an event to the end of the event loop `ticks` times
426+
// This creates a predictable delay using the event loop and
427+
// allows the streams to create a predictable amount of back pressure.
428+
return new Promise(resolve => {
429+
const nextEventLoop = () => {
430+
if (ticks > 0) {
431+
ticks = ticks - 1;
432+
setImmediate(nextEventLoop);
433+
} else {
434+
resolve(ticks);
435+
}
436+
};
437+
nextEventLoop();
438+
});
439+
};
440+
(async () => {
441+
try {
442+
// 150 rows must be enough to reproduce issues with losing the data and to create backpressure
443+
const stream = table.createReadStream({
444+
start: '00000000',
445+
end: '00000150',
446+
});
447+
448+
for await (const row of stream) {
449+
dataResults.push(row.id);
450+
// sleep parameter needs to be high enough to produce backpressure.
451+
await sleep(4000);
452+
}
453+
const expectedResults = Array.from(Array(150).keys())
454+
.map(i => '00000000' + i.toString())
455+
.map(i => i.slice(-8));
456+
assert.deepStrictEqual(dataResults, expectedResults);
457+
done();
458+
} catch (error) {
459+
done(error);
460+
}
461+
})();
462+
});
405463

406464
after(async () => {
407465
server.shutdown(() => {});

0 commit comments

Comments
 (0)