Merge branch 'sqlite-perf' into 'main'

SQLite performance improvements

See merge request soapbox-pub/ditto!115
This commit is contained in:
Alex Gleason 2024-03-03 03:49:31 +00:00
commit 07d7b3868d
10 changed files with 100 additions and 19 deletions

View File

@ -15,3 +15,13 @@ ssh -L 9229:localhost:9229 <user>@<host>
```
Then, in Chromium, go to `chrome://inspect` and the Ditto server should be available.
## SQLite performance
To track slow queries, first set `DEBUG=ditto:sqlite.worker` in the environment so only SQLite logs are shown.
Then, grep for any logs above 0.001s:
```sh
journalctl -fu ditto | grep -v '(0.00s)'
```

View File

@ -72,9 +72,12 @@ async function getConfigs(signal: AbortSignal): Promise<PleromaConfig[]> {
limit: 1,
}], { signal });
return jsonSchema.pipe(configSchema.array()).catch([]).parse(
await new AdminSigner().nip44.decrypt(Conf.pubkey, event.content).catch(() => ''),
);
try {
const decrypted = await new AdminSigner().nip44.decrypt(Conf.pubkey, event.content);
return jsonSchema.pipe(configSchema.array()).catch([]).parse(decrypted);
} catch (_e) {
return [];
}
}
export { configController, frontendConfigController, pleromaAdminDeleteStatusController, updateConfigController };

View File

@ -38,6 +38,7 @@ interface EventRow {
created_at: number;
tags: string;
sig: string;
deleted_at: number | null;
}
interface EventFTSRow {

View File

@ -0,0 +1,13 @@
import { Kysely } from '@/deps.ts';
export async function up(db: Kysely<any>): Promise<void> {
await db.schema
.createIndex('idx_events_kind_pubkey_created_at')
.on('events')
.columns(['kind', 'pubkey', 'created_at'])
.execute();
}
export async function down(db: Kysely<any>): Promise<void> {
await db.schema.dropIndex('idx_events_kind_pubkey_created_at').execute();
}

View File

@ -0,0 +1,28 @@
import { Kysely } from '@/deps.ts';
export async function up(db: Kysely<any>): Promise<void> {
await db.schema.dropIndex('idx_tags_tag').execute();
await db.schema.dropIndex('idx_tags_value').execute();
await db.schema
.createIndex('idx_tags_tag_value')
.on('tags')
.columns(['tag', 'value'])
.execute();
}
export async function down(db: Kysely<any>): Promise<void> {
await db.schema.dropIndex('idx_tags_tag_value').execute();
await db.schema
.createIndex('idx_tags_tag')
.on('tags')
.column('tag')
.execute();
await db.schema
.createIndex('idx_tags_value')
.on('tags')
.column('value')
.execute();
}

View File

@ -0,0 +1,9 @@
import { Kysely } from '@/deps.ts';
export async function up(db: Kysely<any>): Promise<void> {
await db.schema.alterTable('events').addColumn('deleted_at', 'integer').execute();
}
export async function down(db: Kysely<any>): Promise<void> {
await db.schema.alterTable('events').dropColumn('deleted_at').execute();
}

View File

@ -0,0 +1,11 @@
import { Kysely } from '@/deps.ts';
export async function up(db: Kysely<any>): Promise<void> {
await db.schema.createIndex('idx_author_stats_pubkey').on('author_stats').column('pubkey').execute();
await db.schema.createIndex('idx_event_stats_event_id').on('event_stats').column('event_id').execute();
}
export async function down(db: Kysely<any>): Promise<void> {
await db.schema.dropIndex('idx_author_stats_pubkey').on('author_stats').execute();
await db.schema.dropIndex('idx_event_stats_event_id').on('event_stats').execute();
}

View File

@ -28,13 +28,6 @@ Deno.test('insert and filter events', async () => {
);
});
Deno.test('delete events', async () => {
await eventsDB.event(event1);
assertEquals(await eventsDB.query([{ kinds: [1] }]), [event1]);
await eventsDB.remove([{ kinds: [1] }]);
assertEquals(await eventsDB.query([{ kinds: [1] }]), []);
});
Deno.test('query events with local filter', async () => {
await eventsDB.event(event1);
@ -54,6 +47,13 @@ Deno.test('query events with local filter', async () => {
assertEquals(await eventsDB.query([{ kinds: [1], local: false }]), []);
});
Deno.test('delete events', async () => {
await eventsDB.event(event1);
assertEquals(await eventsDB.query([{ kinds: [1] }]), [event1]);
await eventsDB.remove([{ kinds: [1] }]);
assertEquals(await eventsDB.query([{ kinds: [1] }]), []);
});
Deno.test('inserting replaceable events', async () => {
assertEquals((await eventsDB.count([{ kinds: [0], authors: [event0.pubkey] }])).count, 0);

View File

@ -155,6 +155,7 @@ class EventsDB implements NStore {
'events.created_at',
'events.sig',
])
.where('events.deleted_at', 'is', null)
.orderBy('events.created_at', 'desc');
for (const [key, value] of Object.entries(filter)) {
@ -329,12 +330,9 @@ class EventsDB implements NStore {
const query = this.getEventsQuery(filters).clearSelect().select('id');
await db.deleteFrom('events_fts')
.where('id', 'in', () => query)
.execute();
return db.deleteFrom('events')
return await db.updateTable('events')
.where('id', 'in', () => query)
.set({ deleted_at: Math.floor(Date.now() / 1000) })
.execute();
}

View File

@ -1,5 +1,5 @@
/// <reference lib="webworker" />
import { ScopedPerformance } from 'https://deno.land/x/scoped_performance@v2.0.0/mod.ts';
import { Comlink, type CompiledQuery, Debug, DenoSqlite3, type QueryResult } from '@/deps.ts';
import '@/sentry.ts';
@ -12,12 +12,20 @@ export const SqliteWorker = {
},
executeQuery<R>({ sql, parameters }: CompiledQuery): QueryResult<R> {
if (!db) throw new Error('Database not open');
debug(sql);
return {
const perf = new ScopedPerformance();
perf.mark('start');
const result = {
rows: db!.prepare(sql).all(...parameters as any[]) as R[],
numAffectedRows: BigInt(db!.changes),
insertId: BigInt(db!.lastInsertRowId),
};
const { duration } = perf.measure('end', 'start');
debug(`${sql} \x1b[90m(${(duration / 1000).toFixed(2)}s)\x1b[0m`);
return result;
},
destroy() {
db?.close();