mirror of
https://github.com/zadam/trilium.git
synced 2025-11-08 22:35:50 +01:00
small sync fixes and refactorings
This commit is contained in:
@@ -132,8 +132,7 @@ class AbstractBeccaEntity {
|
||||
|
||||
if (this.hasStringContent()) {
|
||||
content = content.toString();
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
content = Buffer.isBuffer(content) ? content : Buffer.from(content);
|
||||
}
|
||||
|
||||
@@ -162,26 +161,26 @@ class AbstractBeccaEntity {
|
||||
});
|
||||
}
|
||||
|
||||
#deleteBlobIfNotUsed(blobId) {
|
||||
if (sql.getValue("SELECT 1 FROM notes WHERE blobId = ? LIMIT 1", [blobId])) {
|
||||
#deleteBlobIfNotUsed(oldBlobId) {
|
||||
if (sql.getValue("SELECT 1 FROM notes WHERE blobId = ? LIMIT 1", [oldBlobId])) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (sql.getValue("SELECT 1 FROM attachments WHERE blobId = ? LIMIT 1", [blobId])) {
|
||||
if (sql.getValue("SELECT 1 FROM attachments WHERE blobId = ? LIMIT 1", [oldBlobId])) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (sql.getValue("SELECT 1 FROM revisions WHERE blobId = ? LIMIT 1", [blobId])) {
|
||||
if (sql.getValue("SELECT 1 FROM revisions WHERE blobId = ? LIMIT 1", [oldBlobId])) {
|
||||
return;
|
||||
}
|
||||
|
||||
sql.execute("DELETE FROM blobs WHERE blobId = ?", [blobId]);
|
||||
sql.execute("DELETE FROM entity_changes WHERE entityName = 'blobs' AND entityId = ?", [blobId]);
|
||||
sql.execute("DELETE FROM blobs WHERE blobId = ?", [oldBlobId]);
|
||||
sql.execute("DELETE FROM entity_changes WHERE entityName = 'blobs' AND entityId = ?", [oldBlobId]);
|
||||
}
|
||||
|
||||
#getUnencryptedContentForHashCalculation(unencryptedContent) {
|
||||
if (this.isProtected) {
|
||||
// a "random" prefix make sure that the calculated hash/blobId is different for an encrypted note and decrypted
|
||||
// a "random" prefix makes sure that the calculated hash/blobId is different for a decrypted/encrypted content
|
||||
const encryptedPrefixSuffix = "t$[nvQg7q)&_ENCRYPTED_?M:Bf&j3jr_";
|
||||
return Buffer.isBuffer(unencryptedContent)
|
||||
? Buffer.concat([Buffer.from(encryptedPrefixSuffix), unencryptedContent])
|
||||
@@ -196,7 +195,7 @@ class AbstractBeccaEntity {
|
||||
* We're using the unencrypted blob for the hash calculation, because otherwise the random IV would
|
||||
* cause every content blob to be unique which would balloon the database size (esp. with revisioning).
|
||||
* This has minor security implications (it's easy to infer that given content is shared between different
|
||||
* notes/attachments, but the trade-off comes out clearly positive).
|
||||
* notes/attachments), but the trade-off comes out clearly positive.
|
||||
*/
|
||||
const newBlobId = utils.hashedBlobId(unencryptedContentForHashCalculation);
|
||||
const blobNeedsInsert = !sql.getValue('SELECT 1 FROM blobs WHERE blobId = ?', [newBlobId]);
|
||||
@@ -214,7 +213,9 @@ class AbstractBeccaEntity {
|
||||
|
||||
sql.upsert("blobs", "blobId", pojo);
|
||||
|
||||
const hash = utils.hash(`${newBlobId}|${pojo.content.toString()}`);
|
||||
// we can't reuse blobId as an entity_changes hash, because this one has to be calculatable without having
|
||||
// access to the decrypted content
|
||||
const hash = blobService.calculateContentHash(pojo);
|
||||
|
||||
entityChangesService.addEntityChange({
|
||||
entityName: 'blobs',
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
const becca = require('../becca/becca');
|
||||
const NotFoundError = require("../errors/not_found_error");
|
||||
const protectedSessionService = require("./protected_session");
|
||||
const utils = require("./utils");
|
||||
|
||||
function getBlobPojo(entityName, entityId) {
|
||||
const entity = becca.getEntity(entityName, entityId);
|
||||
@@ -45,7 +46,12 @@ function processContent(content, isProtected, isStringContent) {
|
||||
}
|
||||
}
|
||||
|
||||
function calculateContentHash({blobId, content}) {
|
||||
return utils.hash(`${blobId}|${content.toString()}`);
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
getBlobPojo,
|
||||
processContent
|
||||
processContent,
|
||||
calculateContentHash
|
||||
};
|
||||
|
||||
@@ -5,6 +5,7 @@ const cls = require('./cls');
|
||||
const utils = require('./utils');
|
||||
const instanceId = require('./member_id');
|
||||
const becca = require("../becca/becca");
|
||||
const blobService = require("../services/blob");
|
||||
|
||||
let maxEntityChangeId = 0;
|
||||
|
||||
@@ -88,52 +89,53 @@ function fillEntityChanges(entityName, entityPrimaryKey, condition = '') {
|
||||
cleanupEntityChangesForMissingEntities(entityName, entityPrimaryKey);
|
||||
|
||||
sql.transactional(() => {
|
||||
const entityIds = sql.getColumn(`SELECT ${entityPrimaryKey} FROM ${entityName}`
|
||||
+ (condition ? ` WHERE ${condition}` : ''));
|
||||
const entityIds = sql.getColumn(`SELECT ${entityPrimaryKey} FROM ${entityName} ${condition}`);
|
||||
|
||||
let createdCount = 0;
|
||||
|
||||
for (const entityId of entityIds) {
|
||||
const existingRows = sql.getValue("SELECT COUNT(1) FROM entity_changes WHERE entityName = ? AND entityId = ?", [entityName, entityId]);
|
||||
|
||||
// we don't want to replace existing entities (which would effectively cause full resync)
|
||||
if (existingRows === 0) {
|
||||
createdCount++;
|
||||
|
||||
let hash;
|
||||
let utcDateChanged;
|
||||
let isSynced;
|
||||
|
||||
if (entityName === 'blobs') {
|
||||
// FIXME: hacky, not sure if it might cause some problems
|
||||
hash = "fake value";
|
||||
utcDateChanged = dateUtils.utcNowDateTime();
|
||||
isSynced = true; // contents are always synced
|
||||
} else {
|
||||
const entity = becca.getEntity(entityName, entityId);
|
||||
|
||||
if (entity) {
|
||||
hash = entity?.generateHash() || "|deleted";
|
||||
utcDateChanged = entity?.getUtcDateChanged() || dateUtils.utcNowDateTime();
|
||||
isSynced = entityName !== 'options' || !!entity?.isSynced;
|
||||
} else {
|
||||
// entity might be null (not present in becca) when it's deleted
|
||||
// FIXME: hacky, not sure if it might cause some problems
|
||||
hash = "deleted";
|
||||
utcDateChanged = dateUtils.utcNowDateTime();
|
||||
isSynced = true; // deletable (the ones with isDeleted) entities are synced
|
||||
}
|
||||
}
|
||||
|
||||
addEntityChange({
|
||||
entityName,
|
||||
entityId,
|
||||
hash: hash,
|
||||
isErased: false,
|
||||
utcDateChanged: utcDateChanged,
|
||||
isSynced: isSynced
|
||||
});
|
||||
if (existingRows !== 0) {
|
||||
// we don't want to replace existing entities (which would effectively cause full resync)
|
||||
continue;
|
||||
}
|
||||
|
||||
createdCount++;
|
||||
|
||||
let hash;
|
||||
let utcDateChanged;
|
||||
let isSynced;
|
||||
|
||||
if (entityName === 'blobs') {
|
||||
const blob = sql.getRow("SELECT blobId, content, utcDateModified FROM blobs WHERE blobId = ?", [entityId]);
|
||||
hash = blobService.calculateContentHash(blob);
|
||||
utcDateChanged = blob.utcDateModified;
|
||||
isSynced = true; // blobs are always synced
|
||||
} else {
|
||||
const entity = becca.getEntity(entityName, entityId);
|
||||
|
||||
if (entity) {
|
||||
hash = entity?.generateHash() || "|deleted";
|
||||
utcDateChanged = entity?.getUtcDateChanged() || dateUtils.utcNowDateTime();
|
||||
isSynced = entityName !== 'options' || !!entity?.isSynced;
|
||||
} else {
|
||||
// entity might be null (not present in becca) when it's deleted
|
||||
// FIXME: hacky, not sure if it might cause some problems
|
||||
hash = "deleted";
|
||||
utcDateChanged = dateUtils.utcNowDateTime();
|
||||
isSynced = true; // deletable (the ones with isDeleted) entities are synced
|
||||
}
|
||||
}
|
||||
|
||||
addEntityChange({
|
||||
entityName,
|
||||
entityId,
|
||||
hash: hash,
|
||||
isErased: false,
|
||||
utcDateChanged: utcDateChanged,
|
||||
isSynced: isSynced
|
||||
});
|
||||
}
|
||||
|
||||
if (createdCount > 0) {
|
||||
@@ -153,7 +155,7 @@ function fillAllEntityChanges() {
|
||||
fillEntityChanges("blobs", "blobId");
|
||||
fillEntityChanges("attributes", "attributeId");
|
||||
fillEntityChanges("etapi_tokens", "etapiTokenId");
|
||||
fillEntityChanges("options", "name", 'isSynced = 1');
|
||||
fillEntityChanges("options", "name", 'WHERE isSynced = 1');
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
@@ -65,7 +65,15 @@ function updateNormalEntity(remoteEntityChange, remoteEntityRow, instanceId) {
|
||||
|| localEntityChange.hash !== remoteEntityChange.hash // sync error, we should still update
|
||||
) {
|
||||
if (remoteEntityChange.entityName === 'blobs') {
|
||||
remoteEntityRow.content = handleContent(remoteEntityRow.content);
|
||||
// we always use a Buffer object which is different from normal saving - there we use a simple string type for
|
||||
// "string notes". The problem is that in general, it's not possible to detect whether a blob content
|
||||
// is string note or note (syncs can arrive out of order)
|
||||
remoteEntityRow.content = remoteEntityRow.content === null ? null : Buffer.from(remoteEntityRow.content, 'base64');
|
||||
|
||||
if (remoteEntityRow.content?.byteLength === 0) {
|
||||
// there seems to be a bug which causes empty buffer to be stored as NULL which is then picked up as inconsistency
|
||||
remoteEntityRow.content = "";
|
||||
}
|
||||
}
|
||||
|
||||
sql.transactional(() => {
|
||||
@@ -92,20 +100,6 @@ function updateNoteReordering(entityChange, entity, instanceId) {
|
||||
return true;
|
||||
}
|
||||
|
||||
function handleContent(content) {
|
||||
// we always use a Buffer object which is different from normal saving - there we use a simple string type for
|
||||
// "string notes". The problem is that in general, it's not possible to detect whether a blob content
|
||||
// is string note or note (syncs can arrive out of order)
|
||||
content = content === null ? null : Buffer.from(content, 'base64');
|
||||
|
||||
if (content && content.byteLength === 0) {
|
||||
// there seems to be a bug which causes empty buffer to be stored as NULL which is then picked up as inconsistency
|
||||
content = "";
|
||||
}
|
||||
|
||||
return content;
|
||||
}
|
||||
|
||||
function eraseEntity(entityChange, instanceId) {
|
||||
const {entityName, entityId} = entityChange;
|
||||
|
||||
|
||||
Reference in New Issue
Block a user