From 0bb7fba3b1699844fcb9a84cead94985986ab908 Mon Sep 17 00:00:00 2001 From: danrahn Date: Sun, 5 Mar 2023 21:11:42 -0800 Subject: [PATCH] Implement database import Implement everything necessary to get database import working, at least for the happy path. Only basic validation has been done, so anything here should definitely be considered beta. Major integration notes: * Add new overlay with a file selector, an 'apply to all libraries' checkbox, and a conflict resolution method. * Add an import_db POST endpoint whose body contains the uploaded database. * Implement POST body parsing. All parameters up to this point have fit in the query string, but that doesn't work for potentially megabytes of marker data. There are almost definitely libraries that can do the proper parsing for me, but I like to reinvent the wheel for some reason. * Remove gate preventing the 'More' button from showing up by default. * Mostly share bulkAdd implementation to do the actual restoration, since the underlying concept is the same. Other changes: * Adjust some queries that could get too large if we wanted information on too many items. SQL limits the number of conditions, and when importing markers we might need information on thousands of individual metadataIds. Add some checks to use a different system in those cases, in which we grab all items for the entire section, them filter those based on the ids passed in. * Rename PurgeConflictResolution to MarkerConflictResolution, as the enum is now shared between purge restoration and DB import. * Several bulkRestore fixes caught when testing bulk import: * Properly set 'lastAction' when checking for overlap among markers we want to restore. * Don't add markerActions to existingMarkers map when there's already an identical existing marker. * Fix broken ImageTest test. --- Client/Script/Common.js | 40 ++- Client/Script/PurgedMarkerManager.js | 12 +- Client/Script/ResultRow.js | 21 +- Client/Script/SectionOptionsOverlay.js | 83 +++++- Server/Commands/PurgeCommands.js | 6 +- Server/Commands/QueryCommands.js | 9 +- Server/ImportExport.js | 333 ++++++++++++++++++++++++- Server/IntroEditor.js | 48 +++- Server/MarkerBackupManager.js | 4 +- Server/PlexQueryManager.js | 194 ++++++++++---- Shared/PlexTypes.js | 12 +- Test/TestClasses/ImageTest.js | 4 +- 12 files changed, 670 insertions(+), 96 deletions(-) diff --git a/Client/Script/Common.js b/Client/Script/Common.js index 614a083..8af57ad 100644 --- a/Client/Script/Common.js +++ b/Client/Script/Common.js @@ -233,6 +233,13 @@ const ServerCommand = { * Resume a suspended Marker Editor. * @returns {Promise} */ resume : async () => jsonRequest('resume'), + + /** + * Upload a database file and import the markers present into the given section. + * @param {Object} database + * @param {number} sectionId + * @param {number} resolveType */ + importDatabase : async (database, sectionId, resolveType) => jsonBodyRequest('import_db', { database : database, sectionId : sectionId, resolveType : resolveType }), }; /* eslint-enable */ @@ -246,8 +253,35 @@ async function jsonRequest(endpoint, parameters={}) { url.searchParams.append(key, value); } + return jsonPostCore(url); +} + +/** + * Similar to jsonRequest, but expects blob data and attaches parameters to the body instead of URL parameters. + * @param {string} endpoint + * @param {Object} parameters */ +async function jsonBodyRequest(endpoint, parameters={}) { + const url = new URL(endpoint, window.location.href); + const data = new FormData(); + for (const [key, value] of Object.entries(parameters)) { + data.append(key, value); + } + + return jsonPostCore(url, data); +} + +/** + * Core method that makes a request to the server, expecting JSON in return. + * @param {URL} url The fully built URL endpoint + * @param {FormData} body The message body, if any. */ +async function jsonPostCore(url, body=null) { + const init = { method : 'POST', headers : { accept : 'application/json' } }; + if (body) { + init.body = body; + } + try { - const response = await (await fetch(url, { method : 'POST', headers : { accept : 'application/json' } })).json(); + const response = await (await fetch(url, init)).json(); Log.verbose(response, `Response from ${url}`); if (!response || response.Error) { @@ -409,6 +443,10 @@ function errorMessage(error) { return error.toString(); } + if (typeof error === 'string') { + return error; + } + return 'I don\'t know what went wrong, sorry :('; } diff --git a/Client/Script/PurgedMarkerManager.js b/Client/Script/PurgedMarkerManager.js index 07ce960..dfd989b 100644 --- a/Client/Script/PurgedMarkerManager.js +++ b/Client/Script/PurgedMarkerManager.js @@ -15,7 +15,7 @@ import { PurgedServer, PurgedShow, PurgedTVSection } from './PurgedMarkerCache.js'; -import { MarkerData, PurgeConflictResolution, SectionType } from '../../Shared/PlexTypes.js'; +import { MarkerConflictResolution, MarkerData, SectionType } from '../../Shared/PlexTypes.js'; import ButtonCreator from './ButtonCreator.js'; import { PlexClientState } from './PlexClientState.js'; import { PlexUI } from './PlexUI.js'; @@ -822,13 +822,13 @@ class MoviePurgeTable extends PurgeTable { class PurgeConflictControl { static #resolutionDescriptions = { - [PurgeConflictResolution.Overwrite] : + [MarkerConflictResolution.Overwrite] : `If any existing markers overlap with the restored marker, delete the existing marker.
` + `This is useful if you previously tweaked Plex-generated markers and analyzing the item reset them.`, - [PurgeConflictResolution.Merge] : + [MarkerConflictResolution.Merge] : `If any existing markers overlap with the restored marker, merge them into one marker that spans ` + `the full length of both.`, - [PurgeConflictResolution.Ignore] : + [MarkerConflictResolution.Ignore] : `If any existing markers overlap with the restored marker, keep the existing marker and permanently ` + `ignore the purged marker.`, }; @@ -844,7 +844,7 @@ class PurgeConflictControl { }; const select = buildNode('select', { id : 'purgeResolution' }, 0, { change : resolutionTypeChange }); - for (const [key, value] of Object.entries(PurgeConflictResolution)) { + for (const [key, value] of Object.entries(MarkerConflictResolution)) { select.appendChild(buildNode('option', { value : value }, key)); } @@ -867,7 +867,7 @@ class PurgeConflictControl { select, buildNode('div', { id : 'purgeResolutionDescription', class : 'hidden' }, - PurgeConflictControl.#resolutionDescriptions[PurgeConflictResolution.Overwrite])); + PurgeConflictControl.#resolutionDescriptions[MarkerConflictResolution.Overwrite])); return selectContainer; } diff --git a/Client/Script/ResultRow.js b/Client/Script/ResultRow.js index d7fe340..bd44b1a 100644 --- a/Client/Script/ResultRow.js +++ b/Client/Script/ResultRow.js @@ -329,11 +329,6 @@ class BulkActionResultRow extends ResultRow { } } -/** TODO: Remove once the initial 'More' options are actually implemented */ -let _sectionMoreEnabled = false; -window.isSectionMoreEnabled = () => _sectionMoreEnabled; -window.setSectionMoreEnabled = (enabled) => { _sectionMoreEnabled = enabled; PlexUI.onFilterApplied(); }; - /** * A section-wide header that is displayed no matter what the current view state is (beside the blank state). * Currently only contains the Filter entrypoint. @@ -371,15 +366,13 @@ class SectionOptionsResultRow extends ResultRow { Tooltip.setTooltip(this.#filterButton, 'No Active Filter'); // Need to seed the setTooltip, then use setText for everything else. this.updateFilterTooltip(); - if (_sectionMoreEnabled) { - this.#moreOptionsButton = ButtonCreator.fullButton( - 'More...', - 'settings', - 'More options', - 'standard', - function(_e, self) { new SectionOptionsOverlay().show(self); }, - { class : 'moreSectionOptionsBtn' }); - } + this.#moreOptionsButton = ButtonCreator.fullButton( + 'More...', + 'settings', + 'More options', + 'standard', + function(_e, self) { new SectionOptionsOverlay().show(self); }, + { class : 'moreSectionOptionsBtn' }); appendChildren(row, titleNode, diff --git a/Client/Script/SectionOptionsOverlay.js b/Client/Script/SectionOptionsOverlay.js index 50e78c7..4dbd306 100644 --- a/Client/Script/SectionOptionsOverlay.js +++ b/Client/Script/SectionOptionsOverlay.js @@ -5,8 +5,8 @@ import Animation from './inc/Animate.js'; import Overlay from './inc/Overlay.js'; import ThemeColors from './ThemeColors.js'; +import { MarkerConflictResolution, MarkerEnum } from '../../Shared/PlexTypes.js'; import ButtonCreator from './ButtonCreator.js'; -import { MarkerEnum } from '../../Shared/PlexTypes.js'; import { PlexClientState } from './PlexClientState.js'; import Tooltip from './inc/Tooltip.js'; @@ -99,9 +99,84 @@ class SectionOptionsOverlay { /** * Overlay invoked from the 'Import Markers' action. */ #onImport() { - Log.info('Import!'); - Overlay.dismiss(); - setTimeout(() => { Overlay.show('Not Yet Implemented'); Overlay.setFocusBackElement(this.#focusBack); }, 250); + const container = buildNode('div', { class : 'sectionOptionsOverlayContainer' }); + appendChildren(container, + buildNode('h2', {}, 'Marker Import'), + buildNode('hr'), + buildNode('span', {}, 'Import markers from a backed up database file to items in this library (or the entire server).'), + buildNode('hr'), + appendChildren(buildNode('div'), + buildNode('label', { for : 'databaseFile' }, 'Select a file: '), + buildNode('input', { type : 'file', accept : '.db,application/x-sqlite3', id : 'databaseFile' })), + appendChildren(buildNode('div'), + buildNode('label', { for : 'applyGlobally' }, 'Apply to all libraries: '), + buildNode('input', { type : 'checkbox', id : 'applyGlobally' })), + appendChildren(buildNode('div'), + buildNode('label', { for : 'resolutionType' }, 'Conflict Resolution Type: '), + appendChildren(buildNode('select', { id : 'resolutionType' }), + buildNode('option', { value : MarkerConflictResolution.Overwrite }, 'Overwrite'), + buildNode('option', { value : MarkerConflictResolution.Merge }, 'Merge'), + buildNode('option', { value : MarkerConflictResolution.Ignore }, 'Ignore'))), + buildNode('br'), + appendChildren(buildNode('div'), + ButtonCreator.textButton( + 'Import', + this.#importConfirmed.bind(this), + { id : 'exportConfirmBtn', class : 'overlayButton confirmSetting' }), + ButtonCreator.textButton( + 'Back', + function () { this.#showMain(true); }.bind(this), + { class : 'overlayButton' })) + ); + + this.#transitionOverlay(container, { dismissible : true, focusBack : this.#focusBack }); + } + + /** + * Upload the attached file and attempt to import all markers it contains. */ + async #importConfirmed() { + /** @type {HTMLInputElement} */ + const fileNode = $('#databaseFile'); + const files = fileNode.files; + if (files?.length !== 1) { + return this.#flashInput(fileNode); + } + + const file = files[0]; + if (!file.name.endsWith('.db')) { + return this.#flashInput(fileNode); + } + + if (file.size > 1024 * 1024 * 32) { // 32MB limit + errorResponseOverlay('Failed to upload and apply markers.', `File size of ${file.size} bytes is larger than 32MB limit.`); + return; + } + + Log.info(file.name, `Uploading File`); + try { + const result = await ServerCommand.importDatabase( + file, + $('#applyGlobally').checked ? -1 : PlexClientState.activeSection(), + $('#resolutionType').value); + + Overlay.dismiss(true /*forReshow*/); + setTimeout(() => { + Overlay.show( + `

Marker Import Succeeded


` + + `Markers Added: ${result.added}
` + + `Ignored Markers (identical): ${result.identical}
` + + `Ignored Markers (merge/ignore/self-overlap): ${result.ignored}
` + + `Existing Markers Deleted (overwritten): ${result.deleted}
` + + `Existing Markers Modified (merged): ${result.modified}
`, + 'Reload', + // Easier to just reload the page instead of reconciling all the newly deleted markers + () => { window.location.reload(); }, + false /*dismissible*/); + Overlay.setFocusBackElement(this.#focusBack); + }, 250); + } catch (err) { + errorResponseOverlay('Failed to upload and apply markers', err); + } } /** diff --git a/Server/Commands/PurgeCommands.js b/Server/Commands/PurgeCommands.js index e1132a8..fd93b34 100644 --- a/Server/Commands/PurgeCommands.js +++ b/Server/Commands/PurgeCommands.js @@ -1,4 +1,4 @@ -import { MarkerData, PurgeConflictResolution } from '../../Shared/PlexTypes.js'; +import { MarkerConflictResolution, MarkerData } from '../../Shared/PlexTypes.js'; import { Log } from '../../Shared/ConsoleLog.js'; import { BackupManager } from '../MarkerBackupManager.js'; @@ -40,8 +40,8 @@ class PurgeCommands { static async restoreMarkers(oldMarkerIds, sectionId, resolveType) { PurgeCommands.#checkBackupManagerEnabled(); // TODO: Why does bulk overwrite keep the old markers around? - if (Object.keys(PurgeConflictResolution).filter(k => PurgeConflictResolution[k] == resolveType).length == 0) { - throw new ServerError(`Unexpected PurgeConflictResolution type: ${resolveType}`, 400); + if (Object.keys(MarkerConflictResolution).filter(k => MarkerConflictResolution[k] == resolveType).length == 0) { + throw new ServerError(`Unexpected MarkerConflictResolution type: ${resolveType}`, 400); } const restoredMarkerData = await BackupManager.restoreMarkers(oldMarkerIds, sectionId, resolveType); diff --git a/Server/Commands/QueryCommands.js b/Server/Commands/QueryCommands.js index f7cf4ae..e874103 100644 --- a/Server/Commands/QueryCommands.js +++ b/Server/Commands/QueryCommands.js @@ -35,12 +35,19 @@ class QueryCommands { throw new ServerError(`Marker query must have at least one metadata id to search for,`, 400); } + let sectionId; + if (keys.length > 500) { + // It's inefficient to query for 500+ individual markers. At this scale, + // get all items in a section and filter accordingly. + sectionId = (await PlexQueries.getMarkersAuto(keys[0])).markers[0].section_id; + } + const markers = {}; for (const key of keys) { markers[key] = []; } - const rawMarkers = await PlexQueries.getMarkersForItems(keys); + const rawMarkers = await PlexQueries.getMarkersForItems(keys, sectionId); for (const rawMarker of rawMarkers) { // TODO: better handing of non intros/credits (i.e. commercials) if (supportedMarkerType(rawMarker.marker_type)) { diff --git a/Server/ImportExport.js b/Server/ImportExport.js index 58809b1..22d58b4 100644 --- a/Server/ImportExport.js +++ b/Server/ImportExport.js @@ -1,16 +1,23 @@ -import { contentType, lookup } from 'mime-types'; -import { createReadStream, existsSync, mkdirSync, rmSync, statSync } from 'fs'; +import { createReadStream, existsSync, mkdirSync, rmSync, statSync, writeFileSync } from 'fs'; import { join } from 'path'; import { Log } from '../Shared/ConsoleLog.js'; +import { MetadataType, PlexQueries } from './PlexQueryManager.js'; +import { sendJsonError, sendJsonSuccess } from './ServerHelpers.js'; import { Config } from './IntroEditorConfig.js'; import DatabaseWrapper from './DatabaseWrapper.js'; -import { PlexQueries } from './PlexQueryManager.js'; +import { MarkerConflictResolution } from '../Shared/PlexTypes.js'; +import ServerError from './ServerError.js'; +import { softRestart } from './IntroEditor.js'; import TransactionBuilder from './TransactionBuilder.js'; +/** @typedef {!import('http').IncomingMessage} IncomingMessage */ /** @typedef {!import('http').ServerResponse} ServerResponse */ +/** @typedef {!import('./MarkerCacheManager').MarkerQueryResult} MarkerQueryResult */ +/** @typedef {!import('../Shared/PlexTypes').MarkerAction} MarkerAction */ + /** * @typedef {Object} BackupRow * @property {string} marker_type @@ -118,8 +125,8 @@ class DatabaseImportExport { } const db = await DatabaseWrapper.CreateDatabase(backupFullPath, true /*allowCreate*/); - await db.run(CheckVersionTable); - await db.run(ExportTable); + await db.exec(CheckVersionTable); + await db.exec(ExportTable); const params = { $tagId : PlexQueries.markerTagId() }; let query = @@ -174,7 +181,7 @@ WHERE t.tag_id=$tagId`; return response.writeHead(500).end('Unable to retrieve marker database.'); } - const mimetype = contentType(lookup(backupName)); + const mimetype = 'application/x-sqlite3'; const readStream = createReadStream(backupFullPath); response.writeHead(200, { 'Content-Type' : mimetype, @@ -185,6 +192,320 @@ WHERE t.tag_id=$tagId`; Log.info(`Successfully created marker backup.`); readStream.pipe(response); } + + /** + * Import the markers in the database uploaded in the request. + * @param {IncomingMessage} request + * @param {ServerResponse} response */ + static async importDatabase(request, response) { + try { + const formData = rebuildFormData(await DatabaseImportExport.#awaitImport(request)); + if (!formData.database + || !formData.database.filename + || !formData.database.data + || !formData.sectionId + || isNaN(parseInt(formData.sectionId.data)) + || !formData.resolveType + || isNaN(parseInt(formData.resolveType.data)) + || Object.keys(MarkerConflictResolution).filter( + k => MarkerConflictResolution[k] == parseInt(formData.resolveType.data)).length == 0) { + throw new ServerError(`Invalid parameters for import_db`); + } + + // Form data looks good. Write the database to a real file. + const backupDir = join(Config.projectRoot(), 'Backup', 'MarkerExports'); + mkdirSync(backupDir, { recursive : true }); + const dbData = Buffer.from(formData.database.data, 'binary'); + const fullPath = join(backupDir, `Import-${formData.database.filename}`); + writeFileSync(fullPath, dbData); + + const stats = await DatabaseImportExport.#doImport( + fullPath, + parseInt(formData.sectionId.data), + parseInt(formData.resolveType.data)); + + // Try to delete the temporarily uploaded file. Not a big deal if we can't though + try { + rmSync(fullPath); + } catch (err) { + Log.warn(err.message, `Unable to clean up uploaded database file`); + } + + return sendJsonSuccess(response, stats); + + } catch (err) { + return sendJsonError(response, err); + } + } + + /** + * Read the newly uploaded database file and attempt to import its markers into this section (or server) + * @param {string} importedFile Full path to the uploaded database file. + * @param {number} sectionId Section ID to apply markers to. -1 to apply server-wide + * @param {number} resolveType The MarkerConflictResolution type */ + static async #doImport(importedFile, sectionId, resolveType) { + const db = await DatabaseWrapper.CreateDatabase(importedFile, false /*allowCreate*/); + try { + let row = await db.get('SELECT version FROM schema_version;'); + if (row.version > CurrentSchemaVersion) { + throw new ServerError('Database was created with a newer version of this application, cannot continue.', 400); + } + + row = await db.get('SELECT * from markers LIMIT 1;'); + if (!row) { + throw new ServerError('Database does not have any markers to import!', 400); + } + } catch (err) { + throw new ServerError('Unable to read imported database. Are you sure it was created by this application?', 400); + } + + // We've verified our data seems correct. Now grab all of them and + // transform them into something that bulkRestore can reason with. + /** @type {BackupRow[]} */ + const backupMarkers = await db.all('SELECT * FROM markers;'); + db.close(); // We don't need this once we've read all rows. + + /** @type {{[guid: string]: BackupRow[]}} */ + const backupGuidMap = {}; + for (const backupMarker of backupMarkers) { + (backupGuidMap[backupMarker.guid] ??= []).push(backupMarker); + } + + const params = {}; + let allMedia = +`SELECT + base.id AS id, + (CASE WHEN season.id IS NULL THEN -1 ELSE season.id END) AS season_id, + (CASE WHEN season.id IS NULL THEN -1 ELSE season.parent_id END) AS show_id, + base.guid AS guid, + base.library_section_id AS library_section_id, + base.metadata_type AS metadata_type +FROM metadata_items base +LEFT JOIN metadata_items season ON base.parent_id=season.id +WHERE (base.metadata_type=1 OR base.metadata_type=4)`; + + if (sectionId !== -1) { + allMedia += ' AND base.library_section_id=$sectionId'; + params.$sectionId = sectionId; + } + + allMedia += ';'; + + /** + * @param {BackupRow[]} backupRows + * @param {MarkerQueryResult} baseItem Not actually a MarkerQueryResult, but very close */ + const backupRowToMarkerAction = (backupRows, baseItem) => { + const markerActions = []; + for (const backupRow of backupRows) { + markerActions.push({ + marker_type : backupRow.marker_type, + final : backupRow.extra.includes('%3Afinal=1'), + start : backupRow.start, + end : backupRow.end, + modified_at : Math.abs(backupRow.modified_at) || '', + created_at : backupRow.created_at, + extra_data : backupRow.extra, + user_created : backupRow.modified_at < 0, + parent_guid : backupRow.guid, + parent_id : baseItem.id, + season_id : baseItem.season_id, + show_id : baseItem.show_id, + section_id : baseItem.library_section_id, + }); + } + + return markerActions; + }; + + // For each section, go through all items and check to see if its guid matches one from the imported DB. + // If it does, create a mapping the Plex DB's metadata id to the markers associated with that guid. + // Note that this means multiple individual items can match to the same set of markers, e.g. the same + // movies/episodes across different episodes, or split apart items (that don't use Editions). + + /** @type {{[sectionId: number]: {sectionType: number, items : {[baseId: number]: MarkerAction[]}}}} */ + const sectionsToUpdate = {}; + const plexItems = await PlexQueries.database().all(allMedia, params); + for (const item of plexItems) { + if (!sectionsToUpdate[item.library_section_id]) { + sectionsToUpdate[item.library_section_id] = { + sectionType : item.metadata_type == MetadataType.Movie ? MetadataType.Movie : MetadataType.Show, + items : {}, + }; + } + + if (backupGuidMap[item.guid]) { + sectionsToUpdate[item.library_section_id].items[item.id] = backupRowToMarkerAction(backupGuidMap[item.guid], item); + } + } + + const stats = { + added : 0, + identical : 0, + deleted : 0, + modified : 0, + ignored : 0 + }; + + for (const [sectionId, sectionInfo] of Object.entries(sectionsToUpdate)) { + const itemsToUpdate = Object.keys(sectionInfo.items).length; + if (itemsToUpdate === 0) { + Log.verbose(`Import: Ignoring section ${sectionId}, no relevant items.`); + continue; + } + + Log.info(`Attempting to restore markers for ${itemsToUpdate} items in section ${sectionId}`); + const restoredMarkerData = await PlexQueries.bulkRestore( + sectionInfo.items, + parseInt(sectionId), + sectionInfo.sectionType, + resolveType); + stats.added += restoredMarkerData.newMarkers.length; + stats.identical += restoredMarkerData.identicalMarkers.length; + stats.deleted += restoredMarkerData.deletedMarkers.length; + stats.modified += restoredMarkerData.modifiedMarkers.length; + stats.ignored += restoredMarkerData.ignoredActions.length; + } + + const ll = (k, v) => `\n\t\t${k}: ${v}`; + Log.info(`Successfully imported markers:` + + ll('Markers imported', stats.added) + + ll('Existing markers deleted (overwrite)', stats.deleted) + + ll('Existing markers modified (merged)', stats.modified) + + ll('Ignored imports', stats.ignored)); + + // Force a mini-reload, as it's easier than trying to perfectly account for the right + // marker deltas, and import isn't expected to be a common scenario, so I don't really care + // about the slightly worse user experience. + await softRestart(); + + return stats; + } + + /** + * Waits for all the data from the request to load, returning a promise + * that resolves to the complete text. + * + * Note: There's a hard 32MB limit. If anything larger is needed in the future, + * this data should probably get streamed to a file first, and then read in chunks. + * @param {IncomingMessage} request + * @returns {Promise} */ + static async #awaitImport(request) { + return new Promise((resolve, reject) => { + let body = ''; + request.on('data', chunk => { + if (Buffer.isBuffer(chunk)) { + body += chunk.toString('binary'); + } else { + body += chunk; + } + + if (body.length > 1024 * 1024 * 32) { + Log.error(`Import upload failed - File too large.`); + reject('File is too large.'); + } + }); + request.on('end', () => { + Log.verbose(`File uploaded (${body.length} bytes)`); + resolve(body); + }); + }); + } + + /** + * On server close, clear out any exported/imported databases that are still lying around, if we can. */ + static Close() { + if (!Config || !Config.projectRoot()) { + // Very early shutdown + return; + } + + const tempRoot = join(Config.projectRoot(), 'Backup', 'MarkerExports'); + if (!existsSync(tempRoot)) { + Log.verbose('ImportExport: No database files to clean up.'); + return; + } + + try { + rmSync(tempRoot, { recursive : true, force : true }); + Log.verbose('ImportExport: Successfully removed cached databases.'); + } catch (err) { + Log.warn(err.message, 'ImportExport: Failed to clear cached databases.'); + } + } +} + +/** Regex that looks for expected 'Content-Disposition: form-data' key/value pairs */ +const headerRegex = /\b(?\w+)="(?[^"]+)"/g; + +/** + * Takes raw form input and rebuilds a key-value dictionary. + * Note: I _really_ should use a library. There's probably a built-in one I + * should be using, but a very quick search didn't bring up anything. + * @param {string} raw */ +function rebuildFormData(raw) { + const data = {}; + + const sentinelBase = raw.substring(0, raw.indexOf('\r\n')); + if (!sentinelBase) { + throw new ServerError('Malformed response, did not find form data sentinel', 500); + } + + const sentinel = sentinelBase + '\r\n'; + const responseEnd = '\r\n' + sentinelBase + '--\r\n'; + + let index = sentinel.length; + for (;;) { + const headerStart = index; + const headerEnd = raw.indexOf('\r\n\r\n', index) + 4; + index = headerEnd; + if (!sentinel || headerEnd === 3) { + return data; + } + + const rawHeaders = raw.substring(headerStart, headerEnd).split('\r\n').filter(h => !!h); + let name = ''; + // We specifically are looking for form-data + // Also make our lives easier and assume no double quotes in names + for (const header of rawHeaders) { + const headerNorm = header.toLowerCase(); + if (headerNorm.startsWith('content-disposition:') && headerNorm.includes('form-data;')) { + const fields = {}; + for (const match of header.matchAll(headerRegex)) { + fields[match.groups.key] = match.groups.value; + } + + if (!fields['name']) { + throw new ServerError('Invalid form data - no name for field', 500); + } + + name = fields['name']; + data[name] = fields; + + // Are any other fields relevant? If so, parse those as well instead of breaking + break; + } + } + + const dataStart = index; + const dataEnd = raw.indexOf(sentinelBase, index); + if (dataEnd === -1) { + throw new ServerError('Invalid form input - could not find data sentinel', 500); + } + + data[name].data = raw.substring(dataStart, dataEnd - 2); // Don't include CRLF before sentinel + index = raw.indexOf(sentinel, dataEnd); + if (index === -1) { + // If we don't find the sentinel, we better be at the end + if (raw.indexOf(responseEnd, dataEnd - 2) != dataEnd - 2) { + Log.warn('Unexpected response end, returning what we have.'); + } + + Log.verbose(`Parsed POST body. Found ${Object.keys(data).length} fields.`); + return data; + } + + index += sentinel.length; + } } export default DatabaseImportExport; diff --git a/Server/IntroEditor.js b/Server/IntroEditor.js index dea34a4..f48c83b 100644 --- a/Server/IntroEditor.js +++ b/Server/IntroEditor.js @@ -16,6 +16,7 @@ import { BackupManager, MarkerBackupManager } from './MarkerBackupManager.js'; import { Config, IntroEditorConfig } from './IntroEditorConfig.js'; import { GetServerState, ServerState, SetServerState } from './ServerState.js'; import { sendJsonError, sendJsonSuccess } from './ServerHelpers.js'; +import DatabaseImportExport from './ImportExport.js'; import FirstRunConfig from './FirstRunConfig.js'; import GETHandler from './GETHandler.js'; import { MarkerCacheManager } from './MarkerCacheManager.js'; @@ -177,12 +178,16 @@ function handleClose(signal, restart=false) { /** * Properly close out open resources in preparation for shutting down the process. * @param {boolean} fullShutdown Whether we're _really_ shutting down the process, or just suspending/restarting it. */ -function cleanupForShutdown(fullShutdown) { +async function cleanupForShutdown(fullShutdown) { ServerCommands.clear(); - PlexQueryManager.Close(); - MarkerBackupManager.Close(); MarkerCacheManager.Close(); ThumbnailManager.Close(fullShutdown); + DatabaseImportExport.Close(); + + await Promise.all([ + PlexQueryManager.Close(), + MarkerBackupManager.Close(), + ]); // Ensure this is always last, as some classes // above may rely on values here. @@ -250,6 +255,25 @@ function userResume(res) { run(); } +/** + * Do a soft internal restart to rebuild all internal caches + * and reconnect to databases, usually after a large operation where + * it's easier to just rebuild everything from scratch. + * + * TODO: How much of this can be moved to a different file instead of Main? */ +async function softRestart() { + Log.info('Soft reset started. Rebuilding everything.'); + if (GetServerState() != ServerState.Running) { + Log.warn(`Attempting a soft reset when the server isn't running. Ignoring it.`); + return; + } + + SetServerState(ServerState.Suspended); + await cleanupForShutdown(false /*fullShutdown*/); + Log.assert(GetServerState() == ServerState.Suspended, 'Server state changed during cleanup, that\'s not right!'); + run(); +} + /** Creates the server. Called after verifying the config file and database. */ async function launchServer() { if (!shouldCreateServer()) { @@ -352,6 +376,14 @@ const ServerActionMap = { resume : (res) => userResume(res), }; +/** + * Map of actions that require more direct access to the underlying request and response. + * Instead of adjusting ServerCommands to accommodate these, have a separate map. + * @type {[endpoint: string]: (req: IncomingMessage, res: ServerResponse) => Promise} */ +const RawActions = { + import_db : async (req, res) => await DatabaseImportExport.importDatabase(req, res), +}; + /** * Handle POST requests, used to return JSON data queried by the client. * @param {IncomingMessage} req @@ -368,6 +400,14 @@ async function handlePost(req, res) { return ServerActionMap[endpoint](res); } + if (RawActions[endpoint]) { + try { + return await RawActions[endpoint](req, res); + } catch (err) { + return sendJsonError(res, err); + } + } + try { const response = await ServerCommands.runCommand(endpoint, req); sendJsonSuccess(res, response); @@ -408,3 +448,5 @@ function checkTestData() { return testData; } + +export { softRestart }; diff --git a/Server/MarkerBackupManager.js b/Server/MarkerBackupManager.js index 5d16146..f609b3b 100644 --- a/Server/MarkerBackupManager.js +++ b/Server/MarkerBackupManager.js @@ -386,7 +386,7 @@ class MarkerBackupManager { } /** Clear out the singleton backup manager instance. */ - static Close() { Instance?.close(); Instance = null; } + static async Close() { await Instance?.close(); Instance = null; } /** * @param {{[sectionId: number]: string}} uuids A map of section ids to UUIDs to uniquely identify a section across severs. @@ -1172,7 +1172,7 @@ ORDER BY id DESC;`; (toRestore[markerAction.parent_id] ??= []).push(markerAction); } - const markerData = await PlexQueries.bulkRestore(toRestore, this.#sectionTypes[sectionId], resolveType); + const markerData = await PlexQueries.bulkRestore(toRestore, sectionId, this.#sectionTypes[sectionId], resolveType); // First thing to log is deletes, as we want order to indicate that they were replaced by subsequent entries. const deletedMarkers = markerData.deletedMarkers.map(x => new MarkerData(x)); diff --git a/Server/PlexQueryManager.js b/Server/PlexQueryManager.js index feb3ece..32a4256 100644 --- a/Server/PlexQueryManager.js +++ b/Server/PlexQueryManager.js @@ -1,5 +1,5 @@ -import { BulkMarkerResolveType, EpisodeData, MarkerData, MarkerEnum, MarkerType, PurgeConflictResolution } from '../Shared/PlexTypes.js'; -import { Log } from '../Shared/ConsoleLog.js'; +import { BulkMarkerResolveType, EpisodeData, MarkerConflictResolution, MarkerData, MarkerEnum, MarkerType } from '../Shared/PlexTypes.js'; +import { ConsoleLog, Log } from '../Shared/ConsoleLog.js'; import DatabaseWrapper from './DatabaseWrapper.js'; import ServerError from './ServerError.js'; @@ -230,7 +230,7 @@ FROM taggings } /** Close the query connection. */ - static Close() { Instance?.close(); Instance = null; } + static async Close() { await Instance?.close(); Instance = null; } /** * Initializes the query manager. Should only be called via the static CreateInstance. @@ -541,23 +541,39 @@ ORDER BY e.\`index\` ASC;`; * Retrieve all markers for the given mediaIds, which should all be either episodes * or movies (and not mixed). * @param {number[]} metadataIds + * @param {number} sectionId Used in cases where we have too many metadataIds to initially grab all markers + * for the section, and then filter to the given metadataIds. * @returns {Promise}*/ - async getMarkersForItems(metadataIds) { - const metadataType = await this.#validateSameMetadataTypes(metadataIds); - if (metadataType == MetadataType.Invalid) { - throw new ServerError(`getMarkersForItems can only accept metadata ids that are the same metadata_type`, 400); - } + async getMarkersForItems(metadataIds, sectionId) { + if (metadataIds.length <= 500) { + const metadataType = await this.#validateSameMetadataTypes(metadataIds); + if (metadataType == MetadataType.Invalid) { + throw new ServerError(`getMarkersForItems can only accept metadata ids that are the same metadata_type`, 400); + } - switch (metadataType) { - case MetadataType.Movie: - return this.#getMarkersForEpisodesOrMovies(metadataIds, this.#extendedMovieMarkerFields); - case MetadataType.Episode: - return this.#getMarkersForEpisodesOrMovies(metadataIds, this.#extendedEpisodeMarkerFields); - default: - { - const typeString = Object.keys(MetadataType).find(k => MetadataType[k] == metadataType); - throw new ServerError(`getMarkersForItems only expects movie or episode ids, found ${typeString}.`, 400); + switch (metadataType) { + case MetadataType.Movie: + return this.#getMarkersForEpisodesOrMovies(metadataIds, this.#extendedMovieMarkerFields); + case MetadataType.Episode: + return this.#getMarkersForEpisodesOrMovies(metadataIds, this.#extendedEpisodeMarkerFields); + default: + { + const typeString = Object.keys(MetadataType).find(k => MetadataType[k] == metadataType); + throw new ServerError(`getMarkersForItems only expects movie or episode ids, found ${typeString}.`, 400); + } + } + } else { + // Too many individual ids to account for. Grab all items in the given section and filter accordingly. + const allMarkers = await this.#getMarkersForSection(sectionId, (await this.#mediaTypeFromId(metadataIds[0])).metadata_type); + const idSet = new Set(metadataIds); + const filtered = []; + for (const marker of allMarkers) { + if (idSet.has(marker.parent_id)) { + filtered.push(marker); + } } + + return this.#postProcessExtendedMarkerFields(filtered); } } @@ -579,6 +595,23 @@ ORDER BY e.\`index\` ASC;`; return this.#postProcessExtendedMarkerFields(await this.#database.all(query, [this.#markerTagId])); } + /** + * Retrieve all markers for the given section. + * @param {number} sectionId + * @param {number} mediaType + * @returns {Promise} */ + async #getMarkersForSection(sectionId, mediaType) { + const fields = this.#extendedFieldsFromMediaType(mediaType); + const markerQuery = + `SELECT ${fields} WHERE taggings.tag_id=$tagId AND section_id=$sectionId ORDER BY taggings.time_offset ASC;`; + const parameters = { + $tagId : this.#markerTagId, + $sectionId : sectionId, + }; + + return this.#database.all(markerQuery, parameters); + } + /** * Retrieve all markers for a single episode. * @param {number} metadataId @@ -852,16 +885,20 @@ ORDER BY e.\`index\` ASC;`; /** * Restore multiple markers at once. + * NOTE: This method is shared between purge restoration and marker import. If changes are made, + * make sure the data types line up. * @param {{ [episodeId: number] : MarkerAction[] }} actions Map of episode IDs to the list of markers to restore for that episode + * @param {number} sectionId The section ID we're restoring markers for * @param {number} sectionType The type of section we're restoring for (i.e. TV or movie) * @param {number} resolveType How to resolve conflicts with existing markers. * @returns {Promise} */ /* eslint-disable-next-line complexity */ // TODO: eslint is right, this is massive and should be broken up. - async bulkRestore(actions, sectionType, resolveType) { + async bulkRestore(actions, sectionId, sectionType, resolveType) { /** @type {RawMarkerData[]} */ let markerList; try { - markerList = await this.getMarkersForItems(Object.keys(actions)); + const keys = Object.keys(actions).map(eid => parseInt(eid)); + markerList = await this.getMarkersForItems(keys, sectionId); } catch (err) { throw new ServerError(`Unable to retrieve existing markers to correlate marker restoration:\n\n${err.message}`, 500); } @@ -886,7 +923,7 @@ ORDER BY e.\`index\` ASC;`; const transaction = new TransactionBuilder(this.#database); for (const [baseItemId, markerActions] of Object.entries(actions)) { markerActions.sort((a, b) => a.start - b.start); - const lastAction = { start : -2, end : -1, marker_type : 'intro', final : false, modified_at : 0 }; + let lastAction = { start : -2, end : -1, marker_type : 'intro', final : false, modified_at : 0 }; // Need a first loop to trim our actions based on overlap with ourselves for (const action of markerActions) { if (action.start <= lastAction.start ? action.end >= lastAction.start : action.start <= lastAction.end) { @@ -894,12 +931,12 @@ ORDER BY e.\`index\` ASC;`; // for the reasons outlined below: ignoredActions.add(action); switch (resolveType) { - case PurgeConflictResolution.Ignore: + case MarkerConflictResolution.Ignore: // Making the first marker take precedence gives us a better chance to // restore the most markers possible. // "|A [B A| {C B] C}" will become "|A A| {C C}" and not "{C C}" break; - case PurgeConflictResolution.Merge: + case MarkerConflictResolution.Merge: // Just extend the last marker, making it the new "tracker" in the backup database. // Credits/final takes precedence over intro/non-final lastAction.start = Math.min(lastAction.start, action.start); @@ -908,13 +945,15 @@ ORDER BY e.\`index\` ASC;`; lastAction.final = lastAction.final || action.final; lastAction.modified_at = Math.max(lastAction.modified_at, action.modified_at); break; - case PurgeConflictResolution.Overwrite: + case MarkerConflictResolution.Overwrite: // Similar to Ignore. break; default: break; } } + + lastAction = action; } // Second loop for overlap with existing markers. @@ -930,22 +969,33 @@ ORDER BY e.\`index\` ASC;`; // Now check for overlap with existing markers. const overlappingMarkers = existingMarkers[baseItemId].filter(getOverlapping); + let identical = false; for (const overlappingMarker of overlappingMarkers) { // If they're identical, ignore no matter the resolution strategy - if (action.start == overlappingMarker.start && action.end == overlappingMarker.end) { - Log.verbose(action, `Ignoring purged marker that is identical to an existing marker.`); + identical = action.start == overlappingMarker.start && action.end == overlappingMarker.end; + if (identical) { + if (identicalMarkers.length === 10) { + Log.verbose('Too many identical markers, moving reporting to TMI'); + } + + if (identicalMarkers.length >= 10) { + Log.tmi(action, `Ignoring marker that is identical to an existing marker`); + } else { + Log.verbose(action, `Ignoring marker that is identical to an existing marker`); + } + // Add to identicalMarkers, but not to ignoredActions. The idea being that for // identicalMarkers we pretend that we restored it with the existing marker, but // we pretend like we explicitly ignored actions in ignoredActions. identicalMarkers.push(overlappingMarker.getRaw()); - continue; + break; } switch (resolveType) { - case PurgeConflictResolution.Ignore: + case MarkerConflictResolution.Ignore: ignoredActions.add(action); continue; - case PurgeConflictResolution.Merge: + case MarkerConflictResolution.Merge: toModify[overlappingMarker.id] = { marker : overlappingMarker.getRaw(), newData : { @@ -953,13 +1003,13 @@ ORDER BY e.\`index\` ASC;`; newEnd : Math.max(action.end, overlappingMarker.end), newType : action.type == MarkerType.Credits ? MarkerType.Credits : overlappingMarker.marker_type, newFinal : overlappingMarker.final || action.final, - newModified : Math.max(action.modified_at || action.recorded_at, overlappingMarker.modified_date), + newModified : Math.max(action.modified_at || action.recorded_at || 0, overlappingMarker.modified_date), } }; ignoredActions.add(action); continue; - case PurgeConflictResolution.Overwrite: + case MarkerConflictResolution.Overwrite: { // Delete. However, potentially change the action type if the overlapping marker is a // credits marker, since it's very likely that we're overwriting an automatically created @@ -984,7 +1034,7 @@ ORDER BY e.\`index\` ASC;`; } } - if (ignoredActions.has(action)) { + if (ignoredActions.has(action) || identical) { continue; } @@ -1025,6 +1075,10 @@ ORDER BY e.\`index\` ASC;`; } } + if (identicalMarkers.length > 10 && Log.getLevel() >= ConsoleLog.Level.Verbose) { + Log.verbose(`Found ${identicalMarkers.length - 10} additional identical markers that are being ignored.`); + } + for (const marker of toDelete) { transaction.addStatement('DELETE FROM taggings WHERE id=?;', [marker.id]); } @@ -1074,6 +1128,64 @@ ORDER BY e.\`index\` ASC;`; // All markers were added successfully. Now query them all to return back to the backup manager // so it can update caches accordingly. + // If this throws, the server really should restart. We added the markers successfully, + // but we can't update our caches since we couldn't retrieve them. + const newMarkers = await this.#newMarkersAfterBulkInsert(existingMarkers, sectionId, sectionType); + + if (newMarkers.length != expectedInserts) { + Log.warn(`Expected to find ${expectedInserts} new markers, found ${newMarkers.length} instead.`); + } + + return { + newMarkers : this.#postProcessExtendedMarkerFields(newMarkers), + identicalMarkers : identicalMarkers, + deletedMarkers : toDelete, + modifiedMarkers : Object.values(toModify), + ignoredActions : Array.from(ignoredActions) + }; + } + + /** + * @param {{ [parent_id: string|number] : TrimmedMarker[] }} existingMarkers + * @param {number} sectionId + * @param {number} sectionType + * @returns {Promise} */ + async #newMarkersAfterBulkInsert(existingMarkers, sectionId, sectionType) { + const toQuery = Object.values(existingMarkers); + if (toQuery.length > 150) { + // If we have more than 150 markers, get all section markers and then filter, + // since we don't want want to hit SQLite's condition limit, and it's faster than + // running hundreds of individual queries. + const allMarkers = await this.#getMarkersForSection(sectionId, sectionType); + const filtered = []; + /** @type {{[parentId: number]: {[start: number]: Set}}} */ + const filterSet = {}; + let existingCount = 0; + let newCount = 0; + for (const markerArr of toQuery) { + for (const trimmedMarker of markerArr) { + if (trimmedMarker.existing()) { + ++existingCount; + continue; + } + + filterSet[trimmedMarker.parent_id] ??= {}; + filterSet[trimmedMarker.parent_id][trimmedMarker.start] ??= new Set(); + filterSet[trimmedMarker.parent_id][trimmedMarker.start].add(trimmedMarker.end); + ++newCount; + } + } + + Log.info(`Expecting ${newCount} new markers against ${existingCount} existing.`); + for (const marker of allMarkers) { + if (filterSet[marker.parent_id] && filterSet[marker.parent_id][marker.start]?.has(marker.end)) { + filtered.push(marker); + } + } + + return filtered; + } + const params = [this.#markerTagId]; let query = `SELECT ${this.#extendedFieldsFromMediaType(sectionType)} WHERE taggings.tag_id=? AND (`; for (const newMarkers of Object.values(existingMarkers)) { @@ -1088,21 +1200,7 @@ ORDER BY e.\`index\` ASC;`; } query = query.substring(0, query.length - 4) + ')'; - - // If this throws, the server really should restart. We added the markers successfully, - // but we can't update our caches since we couldn't retrieve them. - const newMarkers = params.length == 1 ? [] : await this.#database.all(query, params); - if (newMarkers.length != expectedInserts) { - Log.warn(`Expected to find ${expectedInserts} new markers, found ${newMarkers.length} instead.`); - } - - return { - newMarkers : this.#postProcessExtendedMarkerFields(newMarkers), - identicalMarkers : identicalMarkers, - deletedMarkers : toDelete, - modifiedMarkers : Object.values(toModify), - ignoredActions : Array.from(ignoredActions) - }; + return params.length == 1 ? [] : await this.#database.all(query, params); } /** @@ -1239,7 +1337,7 @@ ORDER BY e.\`index\` ASC;`; * @param {number} startShift The time to shift marker starts by, in milliseconds * @param {number} endShift The time to shift marker ends by, in milliseconds */ async shiftMarkers(markers, episodeData, startShift, endShift) { - const episodeIds = Object.keys(markers); + const episodeIds = Object.keys(markers).map(eid => parseInt(eid)); const limits = {}; for (const episode of episodeData) { limits[episode.id] = episode.duration; @@ -1275,7 +1373,7 @@ ORDER BY e.\`index\` ASC;`; // TODO: Movies? Do we want to surface bulk actions? Makes less sense for movies versus all episodes of a season. await transaction.exec(); - const newMarkers = await this.getMarkersForItems(episodeIds); + const newMarkers = await this.getMarkersForItems(episodeIds, markers[episodeIds[0]].section_id); // No ignored markers, no need to prune if (newMarkers.length == expectedShifts) { return newMarkers; diff --git a/Shared/PlexTypes.js b/Shared/PlexTypes.js index 2a7e034..1c267d8 100644 --- a/Shared/PlexTypes.js +++ b/Shared/PlexTypes.js @@ -73,11 +73,11 @@ import MarkerBreakdown from './MarkerBreakdown.js'; * @typedef {Object} BulkRestoreResponse * @property {SerializedMarkerDataMap} newMarkers Markers that were added as the result of a bulk restore. * @property {SerializedMarkerDataMap} deletedMarkers Existing markers that were deleted during the restoration. - * Will be empty if PurgeConflictResolution was not Overwrite. + * Will be empty if MarkerConflictResolution was not Overwrite. * @property {SerializedMarkerDataMap} modifiedMarkers Existing markers that were adjusted instead of creating a new marker. - * Will be empty if PurgeConflictResolution was not Merge. + * Will be empty if MarkerConflictResolution was not Merge. * @property {number} ignoredMarkers Number of markers we decided to ignore, either because an identical marker already existed, - * or because it overlapped with an existing marker and the PurgeConflictResolution was Merge or Ignore. + * or because it overlapped with an existing marker and the MarkerConflictResolution was Merge or Ignore. */ /** * A full row in the Actions table @@ -634,7 +634,7 @@ const SectionType = { /** * Ways to resolve restoring purged markers. * @enum */ -const PurgeConflictResolution = { +const MarkerConflictResolution = { /** If any existing markers overlap the restored marker, delete the existing marker. */ Overwrite : 1, /** Merge overlapping markers into a single marker that spans the entire length of both. */ @@ -651,9 +651,9 @@ export { SeasonData, EpisodeData, MovieData, + MarkerConflictResolution, MarkerData, MarkerEnum, MarkerType, SectionType, - supportedMarkerType, - PurgeConflictResolution }; + supportedMarkerType }; diff --git a/Test/TestClasses/ImageTest.js b/Test/TestClasses/ImageTest.js index d3aa811..c063f09 100644 --- a/Test/TestClasses/ImageTest.js +++ b/Test/TestClasses/ImageTest.js @@ -18,7 +18,7 @@ class ImageTest extends TestBase { } // Hacky, but there are some SVGs that don't have a FILL_COLOR, so we don't expect to see it in the text. - static #colorExceptions = { 'favicon.svg' : 1, 'noise.svg' : 1 }; + static #colorExceptions = new Set(['favicon.svg', 'noise.svg', 'badthumb.svg']); className() { return 'ImageTest'; } @@ -106,7 +106,7 @@ class ImageTest extends TestBase { TestHelpers.verify(response.status == 200, `Expected 200 when retrieving ${endpoint}, got ${response.status}.`); TestHelpers.verifyHeader(response.headers, 'Content-Type', 'img/svg+xml', endpoint); - if (ImageTest.#colorExceptions[endpoint.substring(endpoint.lastIndexOf('/') + 1).toLowerCase()]) { + if (ImageTest.#colorExceptions.has(endpoint.substring(endpoint.lastIndexOf('/') + 1).toLowerCase())) { return; }