server: default config items on load

This commit is contained in:
Johann150 2023-02-04 17:56:15 +01:00
parent 1adf88b090
commit 9a6bb8be7d
Signed by: Johann150
GPG Key ID: 9EE6577A2A06F8F1
6 changed files with 65 additions and 44 deletions

View File

@ -6,10 +6,11 @@
#───┘ URL └───────────────────────────────────────────────────── #───┘ URL └─────────────────────────────────────────────────────
# Final accessible URL seen by a user. # Final accessible URL seen by a user.
url: https://example.tld/ # Only the host part will be used.
# ONCE YOU HAVE STARTED THE INSTANCE, DO NOT CHANGE THE # ONCE YOU HAVE STARTED THE INSTANCE, DO NOT CHANGE THE
# URL SETTINGS AFTER THAT! # URL SETTINGS AFTER THAT!
url: https://example.tld/
# ┌───────────────────────┐ # ┌───────────────────────┐
#───┘ Port and TLS settings └─────────────────────────────────── #───┘ Port and TLS settings └───────────────────────────────────
@ -45,6 +46,7 @@ db:
pass: example-foundkey-pass pass: example-foundkey-pass
# Whether to disable query caching # Whether to disable query caching
# Default is to cache, i.e. false.
#disableCache: true #disableCache: true
# Extra connection options # Extra connection options
@ -57,7 +59,11 @@ db:
redis: redis:
host: localhost host: localhost
port: 6379 port: 6379
#family: dual # can be either a number or string (0/dual, 4/ipv4, 6/ipv6) # Address family to connect over.
# Can be either a number or string (0/dual, 4/ipv4, 6/ipv6)
# Default is "dual".
#family: dual
# The following properties are optional.
#pass: example-pass #pass: example-pass
#prefix: example-prefix #prefix: example-prefix
#db: 1 #db: 1
@ -65,6 +71,7 @@ redis:
# ┌─────────────────────────────┐ # ┌─────────────────────────────┐
#───┘ Elasticsearch configuration └───────────────────────────── #───┘ Elasticsearch configuration └─────────────────────────────
# Elasticsearch is optional.
#elasticsearch: #elasticsearch:
# host: localhost # host: localhost
# port: 9200 # port: 9200
@ -75,35 +82,41 @@ redis:
# ┌─────────────────────┐ # ┌─────────────────────┐
#───┘ Other configuration └───────────────────────────────────── #───┘ Other configuration └─────────────────────────────────────
# Whether disable HSTS # Whether to disable HSTS (not recommended)
# Default is to enable HSTS, i.e. false.
#disableHsts: true #disableHsts: true
# Number of worker processes by type. # Number of worker processes by type.
# The sum must not exceed the number of available cores. # The sum should not exceed the number of available cores.
#clusterLimits: #clusterLimits:
# web: 1 # web: 1
# queue: 1 # queue: 1
# Job concurrency per worker # Jobs each worker will try to work on at a time.
# deliverJobConcurrency: 128 #deliverJobConcurrency: 128
# inboxJobConcurrency: 16 #inboxJobConcurrency: 16
# Job rate limiter # Rate limit for each Worker.
# deliverJobPerSec: 128 # Use -1 to disable.
# inboxJobPerSec: 16 # A rate limit for deliver jobs is not recommended as it comes with
# a big performance penalty due to overhead of rate limiting.
#deliverJobPerSec: 128
#inboxJobPerSec: 16
# Job attempts # Number of times each job will be tried.
# deliverJobMaxAttempts: 12 # 1 means only try once and don't retry.
# inboxJobMaxAttempts: 8 #deliverJobMaxAttempts: 12
#inboxJobMaxAttempts: 8
# Syslog option # Syslog option
#syslog: #syslog:
# host: localhost # host: localhost
# port: 514 # port: 514
# Proxy for HTTP/HTTPS # Proxy for HTTP/HTTPS outgoing connections
#proxy: http://127.0.0.1:3128 #proxy: http://127.0.0.1:3128
# Hosts that should not be connected to through the proxy specified above
#proxyBypassHosts: [ #proxyBypassHosts: [
# 'example.com', # 'example.com',
# '192.0.2.8' # '192.0.2.8'
@ -117,7 +130,8 @@ redis:
# Media Proxy # Media Proxy
#mediaProxy: https://example.com/proxy #mediaProxy: https://example.com/proxy
# Proxy remote files (default: false) # Proxy remote files
# Default is to not proxy remote files, i.e. false.
#proxyRemoteFiles: true #proxyRemoteFiles: true
# Storage path for files if stored locally (absolute path) # Storage path for files if stored locally (absolute path)
@ -125,11 +139,15 @@ redis:
#internalStoragePath: '/etc/foundkey/files' #internalStoragePath: '/etc/foundkey/files'
# Upload or download file size limits (bytes) # Upload or download file size limits (bytes)
# default is 262144000 = 250MiB
#maxFileSize: 262144000 #maxFileSize: 262144000
# Max note text length (in characters) # Max note text length (in characters)
#maxNoteTextLength: 3000 #maxNoteTextLength: 3000
# By default, Foundkey will fail when something tries to make it fetch something from private IPs.
# With the following setting you can explicitly allow some private CIDR subnets.
# Default is an empty list, i.e. none allowed.
#allowedPrivateNetworks: [ #allowedPrivateNetworks: [
# '127.0.0.1/32' # '127.0.0.1/32'
#] #]

View File

@ -38,13 +38,30 @@ export default function load(): Config {
config.port = config.port || parseInt(process.env.PORT || '', 10); config.port = config.port || parseInt(process.env.PORT || '', 10);
// set default values
config.images = Object.assign({ config.images = Object.assign({
info: '/twemoji/1f440.svg', info: '/twemoji/1f440.svg',
notFound: '/twemoji/2049.svg', notFound: '/twemoji/2049.svg',
error: '/twemoji/1f480.svg', error: '/twemoji/1f480.svg',
}, config.images ?? {}); }, config.images ?? {});
if (!config.maxNoteTextLength) config.maxNoteTextLength = 3000; config.clusterLimits = Object.assign({
web: 1,
queue: 1,
}, config.clusterLimits ?? {});
config = Object.assign({
disableHsts: false,
deliverJobConcurrency: 128,
inboxJobConcurrency: 16,
deliverJobPerSec: 128,
inboxJobPerSec: 16,
deliverJobMaxAttempts: 12,
inboxJobMaxAttempts: 8,
proxyRemoteFiles: false,
maxFileSize: 262144000, // 250 MiB
maxNoteTextLength: 3000,
}, config);
mixin.version = meta.version; mixin.version = meta.version;
mixin.host = url.host; mixin.host = url.host;
@ -60,22 +77,9 @@ export default function load(): Config {
if (!config.redis.prefix) config.redis.prefix = mixin.host; if (!config.redis.prefix) config.redis.prefix = mixin.host;
if (!config.clusterLimits) {
config.clusterLimits = {
web: 1,
queue: 1,
};
} else {
config.clusterLimits = {
web: 1,
queue: 1,
...config.clusterLimits,
};
if (config.clusterLimits.web < 1 || config.clusterLimits.queue < 1) { if (config.clusterLimits.web < 1 || config.clusterLimits.queue < 1) {
throw new Error('invalid cluster limits'); throw new Error('invalid cluster limits');
} }
}
return Object.assign(config, mixin); return Object.assign(config, mixin);
} }

View File

@ -19,7 +19,6 @@ export async function downloadUrl(url: string, path: string): Promise<void> {
const timeout = 30 * SECOND; const timeout = 30 * SECOND;
const operationTimeout = MINUTE; const operationTimeout = MINUTE;
const maxSize = config.maxFileSize || 262144000;
const req = got.stream(url, { const req = got.stream(url, {
headers: { headers: {
@ -53,14 +52,14 @@ export async function downloadUrl(url: string, path: string): Promise<void> {
const contentLength = res.headers['content-length']; const contentLength = res.headers['content-length'];
if (contentLength != null) { if (contentLength != null) {
const size = Number(contentLength); const size = Number(contentLength);
if (size > maxSize) { if (size > config.maxFileSize) {
logger.warn(`maxSize exceeded (${size} > ${maxSize}) on response`); logger.warn(`maxSize exceeded (${size} > ${config.maxFileSize}) on response`);
req.destroy(); req.destroy();
} }
} }
}).on('downloadProgress', (progress: Got.Progress) => { }).on('downloadProgress', (progress: Got.Progress) => {
if (progress.transferred > maxSize) { if (progress.transferred > config.maxFileSize) {
logger.warn(`maxSize exceeded (${progress.transferred} > ${maxSize}) on downloadProgress`); logger.warn(`maxSize exceeded (${progress.transferred} > ${config.maxFileSize}) on downloadProgress`);
req.destroy(); req.destroy();
} }
}); });

View File

@ -89,7 +89,7 @@ const _https = new https.Agent({
lookup: cache.lookup, lookup: cache.lookup,
} as https.AgentOptions); } as https.AgentOptions);
const maxSockets = Math.max(256, config.deliverJobConcurrency || 128); const maxSockets = Math.max(256, config.deliverJobConcurrency);
/** /**
* Get http proxy or non-proxy agent * Get http proxy or non-proxy agent

View File

@ -96,7 +96,7 @@ export function deliver(user: ThinUser, content: unknown, to: string | null) {
}; };
return deliverQueue.add(data, { return deliverQueue.add(data, {
attempts: config.deliverJobMaxAttempts || 12, attempts: config.deliverJobMaxAttempts,
timeout: MINUTE, timeout: MINUTE,
backoff: { backoff: {
type: 'apBackoff', type: 'apBackoff',
@ -113,7 +113,7 @@ export function inbox(activity: IActivity, signature: httpSignature.IParsedSigna
}; };
return inboxQueue.add(data, { return inboxQueue.add(data, {
attempts: config.inboxJobMaxAttempts || 8, attempts: config.inboxJobMaxAttempts,
timeout: 5 * MINUTE, timeout: 5 * MINUTE,
backoff: { backoff: {
type: 'apBackoff', type: 'apBackoff',
@ -291,8 +291,8 @@ export function webhookDeliver(webhook: Webhook, type: typeof webhookEventTypes[
export default function() { export default function() {
if (envOption.onlyServer) return; if (envOption.onlyServer) return;
deliverQueue.process(config.deliverJobConcurrency || 128, processDeliver); deliverQueue.process(config.deliverJobConcurrency, processDeliver);
inboxQueue.process(config.inboxJobConcurrency || 16, processInbox); inboxQueue.process(config.inboxJobConcurrency, processInbox);
endedPollNotificationQueue.process(endedPollNotification); endedPollNotificationQueue.process(endedPollNotification);
webhookDeliverQueue.process(64, processWebhookDeliver); webhookDeliverQueue.process(64, processWebhookDeliver);
processDb(dbQueue); processDb(dbQueue);

View File

@ -4,8 +4,8 @@ import { DeliverJobData, InboxJobData, DbJobData, ObjectStorageJobData, EndedPol
export const systemQueue = initializeQueue<Record<string, unknown>>('system'); export const systemQueue = initializeQueue<Record<string, unknown>>('system');
export const endedPollNotificationQueue = initializeQueue<EndedPollNotificationJobData>('endedPollNotification'); export const endedPollNotificationQueue = initializeQueue<EndedPollNotificationJobData>('endedPollNotification');
export const deliverQueue = initializeQueue<DeliverJobData>('deliver', config.deliverJobPerSec || 128); export const deliverQueue = initializeQueue<DeliverJobData>('deliver', config.deliverJobPerSec);
export const inboxQueue = initializeQueue<InboxJobData>('inbox', config.inboxJobPerSec || 16); export const inboxQueue = initializeQueue<InboxJobData>('inbox', config.inboxJobPerSec);
export const dbQueue = initializeQueue<DbJobData>('db'); export const dbQueue = initializeQueue<DbJobData>('db');
export const objectStorageQueue = initializeQueue<ObjectStorageJobData>('objectStorage'); export const objectStorageQueue = initializeQueue<ObjectStorageJobData>('objectStorage');
export const webhookDeliverQueue = initializeQueue<WebhookDeliverJobData>('webhookDeliver', 64); export const webhookDeliverQueue = initializeQueue<WebhookDeliverJobData>('webhookDeliver', 64);