refactor(core): Make pruning via lifecycle configuration in S3 mode mandatory (#7482)

Since we do not store which executions produced binary data, for pruning
on S3 we need to query for binary data items for each execution in order
to delete them. To minimize requests to S3, allow the user to skip
pruning requests when setting TTL at bucket level.
This commit is contained in:
Iván Ovejero 2023-10-24 10:37:02 +02:00 committed by GitHub
parent a9fdd018f4
commit 78243edd18
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 7 additions and 35 deletions

View file

@ -301,10 +301,7 @@ export class ExecutionRepository extends Repository<ExecutionEntity> {
* Permanently remove a single execution and its binary data.
*/
async hardDelete(ids: { workflowId: string; executionId: string }) {
return Promise.all([
this.binaryDataService.deleteMany([ids]),
this.delete({ id: ids.executionId }),
]);
return Promise.all([this.delete(ids.executionId), this.binaryDataService.deleteMany([ids])]);
}
async updateExistingExecution(executionId: string, execution: Partial<IExecutionResponse>) {
@ -565,7 +562,7 @@ export class ExecutionRepository extends Repository<ExecutionEntity> {
return;
}
await this.binaryDataService.deleteMany(workflowIdsAndExecutionIds);
await this.binaryDataService.deleteMany(workflowIdsAndExecutionIds); // only in FS mode
this.logger.debug(
`Hard-deleting ${executionIds.length} executions from database (pruning cycle)`,

View file

@ -151,7 +151,7 @@ export class BinaryDataService {
if (!manager) return;
await manager.deleteMany(ids);
if (manager.deleteMany) await manager.deleteMany(ids);
}
@LogCatch((error) =>

View file

@ -83,19 +83,6 @@ export class ObjectStoreManager implements BinaryData.Manager {
return { fileId: targetFileId, fileSize: sourceFile.length };
}
async deleteMany(ids: BinaryData.IdsForDeletion) {
const prefixes = ids.map(
({ workflowId, executionId }) =>
`workflows/${workflowId}/executions/${executionId}/binary_data/`,
);
await Promise.all(
prefixes.map(async (prefix) => {
await this.objectStoreService.deleteMany(prefix);
}),
);
}
async rename(oldFileId: string, newFileId: string) {
const oldFile = await this.objectStoreService.get(oldFileId, { mode: 'buffer' });
const oldFileMetadata = await this.objectStoreService.getMetadata(oldFileId);

View file

@ -55,7 +55,10 @@ export namespace BinaryData {
getAsStream(fileId: string, chunkSize?: number): Promise<Readable>;
getMetadata(fileId: string): Promise<Metadata>;
deleteMany(ids: IdsForDeletion): Promise<void>;
/**
* Present for `FileSystem`, absent for `ObjectStore` (delegated to S3 lifecycle config)
*/
deleteMany?(ids: IdsForDeletion): Promise<void>;
copyByFileId(workflowId: string, executionId: string, sourceFileId: string): Promise<string>;
copyByFilePath(

View file

@ -116,21 +116,6 @@ describe('copyByFilePath()', () => {
});
});
describe('deleteMany()', () => {
it('should delete many files by prefix', async () => {
const ids = [
{ workflowId, executionId },
{ workflowId: otherWorkflowId, executionId: otherExecutionId },
];
const promise = objectStoreManager.deleteMany(ids);
await expect(promise).resolves.not.toThrow();
expect(objectStoreService.deleteMany).toHaveBeenCalledTimes(2);
});
});
describe('rename()', () => {
it('should rename a file', async () => {
const promise = objectStoreManager.rename(fileId, otherFileId);