Compare commits
No commits in common. "develop" and "otp26" have entirely different histories.
406 changed files with 23436 additions and 48664 deletions
1
.buildpacks
Normal file
1
.buildpacks
Normal file
|
@ -0,0 +1 @@
|
||||||
|
https://github.com/hashnuke/heroku-buildpack-elixir
|
|
@ -1,14 +1,3 @@
|
||||||
[
|
[
|
||||||
import_deps: [:ecto, :ecto_sql, :phoenix],
|
inputs: ["mix.exs", "{config,lib,test}/**/*.{ex,exs}", "priv/repo/migrations/*.exs", "priv/repo/optional_migrations/**/*.exs", "priv/scrubbers/*.ex"]
|
||||||
subdirectories: ["priv/*/migrations"],
|
|
||||||
plugins: [Phoenix.LiveView.HTMLFormatter],
|
|
||||||
inputs: [
|
|
||||||
"mix.exs",
|
|
||||||
"*.{heex,ex,exs}",
|
|
||||||
"{config,lib,test}/**/*.{heex,ex,exs}",
|
|
||||||
"priv/*/seeds.exs",
|
|
||||||
"priv/repo/migrations/*.exs",
|
|
||||||
"priv/repo/optional_migrations/**/*.exs",
|
|
||||||
"priv/scrubbers/*.ex"
|
|
||||||
]
|
|
||||||
]
|
]
|
||||||
|
|
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -78,4 +78,3 @@ docs/venv
|
||||||
# docker stuff
|
# docker stuff
|
||||||
docker-db
|
docker-db
|
||||||
*.iml
|
*.iml
|
||||||
docker-compose.override.yml
|
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
labels:
|
platform: linux/amd64
|
||||||
platform: linux/amd64
|
|
||||||
|
|
||||||
depends_on:
|
depends_on:
|
||||||
- test
|
- test
|
||||||
|
@ -35,10 +34,10 @@ variables:
|
||||||
- &clean "(rm -rf release || true) && (rm -rf _build || true) && (rm -rf /root/.mix)"
|
- &clean "(rm -rf release || true) && (rm -rf _build || true) && (rm -rf /root/.mix)"
|
||||||
- &mix-clean "mix deps.clean --all && mix clean"
|
- &mix-clean "mix deps.clean --all && mix clean"
|
||||||
|
|
||||||
steps:
|
pipeline:
|
||||||
# Canonical amd64
|
# Canonical amd64
|
||||||
debian-bookworm:
|
debian-bookworm:
|
||||||
image: hexpm/elixir:1.15.4-erlang-26.0.2-debian-bookworm-20230612
|
image: hexpm/elixir:1.15.4-erlang-25.3.2.5-debian-bookworm-20230612
|
||||||
<<: *on-release
|
<<: *on-release
|
||||||
environment:
|
environment:
|
||||||
MIX_ENV: prod
|
MIX_ENV: prod
|
||||||
|
@ -66,35 +65,9 @@ steps:
|
||||||
- export DEST=scaleway:akkoma-updates/$${CI_COMMIT_TAG:-"$CI_COMMIT_BRANCH"}/akkoma-amd64-ubuntu-jammy.zip
|
- export DEST=scaleway:akkoma-updates/$${CI_COMMIT_TAG:-"$CI_COMMIT_BRANCH"}/akkoma-amd64-ubuntu-jammy.zip
|
||||||
- /bin/sh /entrypoint.sh
|
- /bin/sh /entrypoint.sh
|
||||||
|
|
||||||
debian-bullseye:
|
|
||||||
image: hexpm/elixir:1.15.4-erlang-26.0.2-debian-bullseye-20230612
|
|
||||||
<<: *on-release
|
|
||||||
environment:
|
|
||||||
MIX_ENV: prod
|
|
||||||
DEBIAN_FRONTEND: noninteractive
|
|
||||||
commands:
|
|
||||||
- apt-get update && apt-get install -y cmake libmagic-dev rclone zip imagemagick libmagic-dev git build-essential g++ wget
|
|
||||||
- *clean
|
|
||||||
- echo "import Config" > config/prod.secret.exs
|
|
||||||
- *setup-hex
|
|
||||||
- *tag-build
|
|
||||||
- mix deps.get --only prod
|
|
||||||
- mix release --path release
|
|
||||||
- zip akkoma-amd64-debian-bullseye.zip -r release
|
|
||||||
|
|
||||||
release-debian-bullseye:
|
|
||||||
image: akkoma/releaser
|
|
||||||
<<: *on-release
|
|
||||||
secrets: *scw-secrets
|
|
||||||
commands:
|
|
||||||
- export SOURCE=akkoma-amd64-debian-bullseye.zip
|
|
||||||
# AMD64
|
|
||||||
- export DEST=scaleway:akkoma-updates/$${CI_COMMIT_TAG:-"$CI_COMMIT_BRANCH"}/akkoma-amd64-debian-bullseye.zip
|
|
||||||
- /bin/sh /entrypoint.sh
|
|
||||||
|
|
||||||
# Canonical amd64-musl
|
# Canonical amd64-musl
|
||||||
musl:
|
musl:
|
||||||
image: hexpm/elixir:1.15.4-erlang-26.0.2-alpine-3.18.2
|
image: hexpm/elixir:1.14.3-erlang-25.2.2-alpine-3.18.0
|
||||||
<<: *on-stable
|
<<: *on-stable
|
||||||
environment:
|
environment:
|
||||||
MIX_ENV: prod
|
MIX_ENV: prod
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
labels:
|
platform: linux/arm64
|
||||||
platform: linux/aarch64
|
|
||||||
|
|
||||||
depends_on:
|
depends_on:
|
||||||
- test
|
- test
|
||||||
|
@ -35,10 +34,10 @@ variables:
|
||||||
- &clean "(rm -rf release || true) && (rm -rf _build || true) && (rm -rf /root/.mix)"
|
- &clean "(rm -rf release || true) && (rm -rf _build || true) && (rm -rf /root/.mix)"
|
||||||
- &mix-clean "mix deps.clean --all && mix clean"
|
- &mix-clean "mix deps.clean --all && mix clean"
|
||||||
|
|
||||||
steps:
|
pipeline:
|
||||||
# Canonical arm64
|
# Canonical arm64
|
||||||
debian-bookworm:
|
debian-bookworm:
|
||||||
image: hexpm/elixir:1.15.4-erlang-26.0.2-debian-bookworm-20230612
|
image: hexpm/elixir:1.15.4-erlang-25.3.2.5-debian-bookworm-20230612
|
||||||
<<: *on-release
|
<<: *on-release
|
||||||
environment:
|
environment:
|
||||||
MIX_ENV: prod
|
MIX_ENV: prod
|
||||||
|
@ -66,7 +65,7 @@ steps:
|
||||||
|
|
||||||
# Canonical arm64-musl
|
# Canonical arm64-musl
|
||||||
musl:
|
musl:
|
||||||
image: hexpm/elixir:1.15.4-erlang-26.0.2-alpine-3.18.2
|
image: hexpm/elixir:1.15.4-erlang-25.3.2.5-alpine-3.18.2
|
||||||
<<: *on-stable
|
<<: *on-stable
|
||||||
environment:
|
environment:
|
||||||
MIX_ENV: prod
|
MIX_ENV: prod
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
labels:
|
platform: linux/amd64
|
||||||
platform: linux/amd64
|
|
||||||
|
|
||||||
depends_on:
|
depends_on:
|
||||||
- test
|
- test
|
||||||
|
@ -46,7 +45,7 @@ variables:
|
||||||
- &clean "(rm -rf release || true) && (rm -rf _build || true) && (rm -rf /root/.mix)"
|
- &clean "(rm -rf release || true) && (rm -rf _build || true) && (rm -rf /root/.mix)"
|
||||||
- &mix-clean "mix deps.clean --all && mix clean"
|
- &mix-clean "mix deps.clean --all && mix clean"
|
||||||
|
|
||||||
steps:
|
pipeline:
|
||||||
docs:
|
docs:
|
||||||
<<: *on-point-release
|
<<: *on-point-release
|
||||||
secrets:
|
secrets:
|
||||||
|
|
|
@ -1,56 +0,0 @@
|
||||||
labels:
|
|
||||||
platform: linux/amd64
|
|
||||||
|
|
||||||
variables:
|
|
||||||
- &scw-secrets
|
|
||||||
- SCW_ACCESS_KEY
|
|
||||||
- SCW_SECRET_KEY
|
|
||||||
- SCW_DEFAULT_ORGANIZATION_ID
|
|
||||||
- &setup-hex "mix local.hex --force && mix local.rebar --force"
|
|
||||||
- &on-release
|
|
||||||
when:
|
|
||||||
event:
|
|
||||||
- push
|
|
||||||
- tag
|
|
||||||
branch:
|
|
||||||
- develop
|
|
||||||
- stable
|
|
||||||
- refs/tags/v*
|
|
||||||
- refs/tags/stable-*
|
|
||||||
- &on-stable
|
|
||||||
when:
|
|
||||||
event:
|
|
||||||
- push
|
|
||||||
- tag
|
|
||||||
branch:
|
|
||||||
- stable
|
|
||||||
- refs/tags/stable-*
|
|
||||||
- &on-point-release
|
|
||||||
when:
|
|
||||||
event:
|
|
||||||
- push
|
|
||||||
branch:
|
|
||||||
- develop
|
|
||||||
- stable
|
|
||||||
- &on-pr-open
|
|
||||||
when:
|
|
||||||
event:
|
|
||||||
- pull_request
|
|
||||||
|
|
||||||
- &tag-build "export BUILD_TAG=$${CI_COMMIT_TAG:-\"$CI_COMMIT_BRANCH\"} && export PLEROMA_BUILD_BRANCH=$BUILD_TAG"
|
|
||||||
|
|
||||||
- &clean "(rm -rf release || true) && (rm -rf _build || true) && (rm -rf /root/.mix)"
|
|
||||||
- &mix-clean "mix deps.clean --all && mix clean"
|
|
||||||
|
|
||||||
steps:
|
|
||||||
lint:
|
|
||||||
image: akkoma/ci-base:1.16-otp26
|
|
||||||
<<: *on-pr-open
|
|
||||||
environment:
|
|
||||||
MIX_ENV: test
|
|
||||||
commands:
|
|
||||||
- mix local.hex --force
|
|
||||||
- mix local.rebar --force
|
|
||||||
- mix deps.get
|
|
||||||
- mix compile
|
|
||||||
- mix format --check-formatted
|
|
|
@ -1,19 +1,18 @@
|
||||||
labels:
|
platform: linux/amd64
|
||||||
platform: linux/amd64
|
|
||||||
|
|
||||||
depends_on:
|
|
||||||
- lint
|
|
||||||
|
|
||||||
matrix:
|
matrix:
|
||||||
ELIXIR_VERSION:
|
ELIXIR_VERSION:
|
||||||
- 1.14
|
- 1.14
|
||||||
- 1.15
|
- 1.15
|
||||||
- 1.16
|
|
||||||
OTP_VERSION:
|
OTP_VERSION:
|
||||||
- 25
|
- 25
|
||||||
- 26
|
- 26
|
||||||
include:
|
include:
|
||||||
- ELIXIR_VERSION: 1.16
|
- ELIXIR_VERSION: 1.14
|
||||||
|
OTP_VERSION: 25
|
||||||
|
- ELIXIR_VERSION: 1.15
|
||||||
|
OTP_VERSION: 25
|
||||||
|
- ELIXIR_VERSION: 1.15
|
||||||
OTP_VERSION: 26
|
OTP_VERSION: 26
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
|
@ -68,8 +67,16 @@ services:
|
||||||
POSTGRES_USER: postgres
|
POSTGRES_USER: postgres
|
||||||
POSTGRES_PASSWORD: postgres
|
POSTGRES_PASSWORD: postgres
|
||||||
|
|
||||||
steps:
|
pipeline:
|
||||||
test:
|
lint:
|
||||||
|
<<: *on-pr-open
|
||||||
|
image: akkoma/ci-base:1.15
|
||||||
|
commands:
|
||||||
|
- mix local.hex --force
|
||||||
|
- mix local.rebar --force
|
||||||
|
- mix format --check-formatted
|
||||||
|
|
||||||
|
build:
|
||||||
image: akkoma/ci-base:${ELIXIR_VERSION}-otp${OTP_VERSION}
|
image: akkoma/ci-base:${ELIXIR_VERSION}-otp${OTP_VERSION}
|
||||||
<<: *on-pr-open
|
<<: *on-pr-open
|
||||||
environment:
|
environment:
|
||||||
|
@ -83,9 +90,24 @@ steps:
|
||||||
- mix local.rebar --force
|
- mix local.rebar --force
|
||||||
- mix deps.get
|
- mix deps.get
|
||||||
- mix compile
|
- mix compile
|
||||||
- mix ecto.drop -f -q
|
|
||||||
- mix ecto.create
|
test:
|
||||||
- mix ecto.migrate
|
image: akkoma/ci-base:${ELIXIR_VERSION}-otp${OTP_VERSION}
|
||||||
- mkdir -p test/tmp
|
<<: *on-pr-open
|
||||||
- mix test --preload-modules --exclude erratic --exclude federated --exclude mocked || mix test --failed
|
environment:
|
||||||
- mix test --preload-modules --only mocked || mix test --failed
|
MIX_ENV: test
|
||||||
|
POSTGRES_DB: pleroma_test_${ELIXIR_VERSION}_${OTP_VERSION}
|
||||||
|
POSTGRES_USER: postgres
|
||||||
|
POSTGRES_PASSWORD: postgres
|
||||||
|
DB_HOST: postgres
|
||||||
|
commands:
|
||||||
|
- mix local.hex --force
|
||||||
|
- mix local.rebar --force
|
||||||
|
- mix deps.get
|
||||||
|
- mix compile
|
||||||
|
- mix ecto.drop -f -q
|
||||||
|
- mix ecto.create
|
||||||
|
- mix ecto.migrate
|
||||||
|
- mkdir -p test/tmp
|
||||||
|
- mix test --preload-modules --exclude erratic --exclude federated --exclude mocked
|
||||||
|
- mix test --preload-modules --only mocked
|
||||||
|
|
126
CHANGELOG.md
126
CHANGELOG.md
|
@ -4,134 +4,10 @@ All notable changes to this project will be documented in this file.
|
||||||
|
|
||||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
|
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
|
||||||
|
|
||||||
## UNRELEASED
|
## Unreleased
|
||||||
|
|
||||||
## BREAKING
|
|
||||||
- Minimum PostgreSQL version is raised to 12
|
|
||||||
- Swagger UI moved from `/akkoma/swaggerui/` to `/pleroma/swaggerui/`
|
|
||||||
|
|
||||||
## Added
|
|
||||||
- Implement [FEP-67ff](https://codeberg.org/fediverse/fep/src/branch/main/fep/67ff/fep-67ff.md) (federation documentation)
|
|
||||||
- Meilisearch: it is now possible to use separate keys for search and admin actions
|
|
||||||
- New standalone `prune_orphaned_activities` mix task with configurable batch limit
|
|
||||||
- The `prune_objects` mix task now accepts a `--limit` parameter for initial object pruning
|
|
||||||
|
|
||||||
## Fixed
|
|
||||||
- Meilisearch: order of results returned from our REST API now actually matches how Meilisearch ranks results
|
|
||||||
- Emoji are now federated as anonymous objects, fixing issues with
|
|
||||||
some strict servers e.g. rejecting e.g. remote emoji reactions
|
|
||||||
- AP objects with additional JSON-LD profiles beyond ActivityStreams can now be fetched
|
|
||||||
- Single-selection polls no longer expose the voter_count; MastoAPI demands it be null
|
|
||||||
and this confused some clients leading to vote distributions >100%
|
|
||||||
|
|
||||||
## Changed
|
|
||||||
- Refactored Rich Media to cache the content in the database. Fetching operations that could block status rendering have been eliminated.
|
|
||||||
|
|
||||||
## 2024.04.1 (Security)
|
|
||||||
|
|
||||||
## Fixed
|
|
||||||
- Issue allowing non-owners to use media objects in posts
|
|
||||||
- Issue allowing use of non-media objects as attachments and crashing timeline rendering
|
|
||||||
- Issue allowing webfinger spoofing in certain situations
|
|
||||||
|
|
||||||
## 2024.04
|
|
||||||
|
|
||||||
## Added
|
|
||||||
- Support for [FEP-fffd](https://codeberg.org/fediverse/fep/src/branch/main/fep/fffd/fep-fffd.md) (proxy objects)
|
|
||||||
- Verified support for elixir 1.16
|
|
||||||
- Uploadfilter `Pleroma.Upload.Filter.Exiftool.ReadDescription` returns description values to the FE so they can pre fill the image description field
|
|
||||||
NOTE: this filter MUST be placed before `Exiftool.StripMetadata` to work
|
|
||||||
|
|
||||||
## Changed
|
|
||||||
- Inbound pipeline error handing was modified somewhat, which should lead to less incomprehensible log spam. Hopefully.
|
|
||||||
- Uploadfilter `Pleroma.Upload.Filter.Exiftool` was replaced by `Pleroma.Upload.Filter.Exiftool.StripMetadata`;
|
|
||||||
the latter strips all non-essential metadata by default but can be configured.
|
|
||||||
To regain the old behaviour of only stripping GPS data set `purge: ["gps:all"]`.
|
|
||||||
- Uploadfilter `Pleroma.Upload.Filter.Exiftool` has been renamed to `Pleroma.Upload.Filter.Exiftool.StripMetadata`
|
|
||||||
- MRF.InlineQuotePolicy now prefers to insert display URLs instead of ActivityPub IDs
|
|
||||||
- Old accounts are no longer listed in WebFinger as aliases; this was breaking spec
|
|
||||||
|
|
||||||
## Fixed
|
|
||||||
- Issue preventing fetching anything from IPv6-only instances
|
|
||||||
- Issue allowing post content to leak via opengraph tags despite :estrict\_unauthenticated being set
|
|
||||||
- Move activities no longer operate on stale user data
|
|
||||||
- Missing definitions in our JSON-LD context
|
|
||||||
- Issue mangling newlines in code blocks for RSS/Atom feeds
|
|
||||||
- static\_fe squeezing non-square avatars and emoji
|
|
||||||
- Issue leading to properly JSON-LD compacted emoji reactions being rejected
|
|
||||||
- We now use a standard-compliant Accept header when fetching ActivityPub objects
|
|
||||||
- /api/pleroma/notification\_settings was rejecting body parameters;
|
|
||||||
this also broke changing this setting via akkoma-fe
|
|
||||||
- Issue leading to Mastodon bot accounts being rejected
|
|
||||||
- Scope misdetection of remote posts resulting from not recognising
|
|
||||||
JSON-LD-compacted forms of public scope; affected e.g. federation with bovine
|
|
||||||
- Ratelimits encountered when fetching objects are now respected; 429 responses will cause a backoff when we get one.
|
|
||||||
|
|
||||||
## Removed
|
|
||||||
- ActivityPub Client-To-Server write API endpoints have been disabled;
|
|
||||||
read endpoints are planned to be removed next release unless a clear need is demonstrated
|
|
||||||
|
|
||||||
## 2024.03
|
|
||||||
|
|
||||||
## Added
|
|
||||||
- CLI tasks best-effort checking for past abuse of the recent spoofing exploit
|
|
||||||
- new `:mrf_steal_emoji, :download_unknown_size` option; defaults to `false`
|
|
||||||
|
|
||||||
## Changed
|
|
||||||
- `Pleroma.Upload, :base_url` now MUST be configured explicitly if used;
|
|
||||||
use of the same domain as the instance is **strongly** discouraged
|
|
||||||
- `:media_proxy, :base_url` now MUST be configured explicitly if used;
|
|
||||||
use of the same domain as the instance is **strongly** discouraged
|
|
||||||
- StealEmoji:
|
|
||||||
- now uses the pack.json format;
|
|
||||||
existing users must migrate with an out-of-band script (check release notes)
|
|
||||||
- only steals shortcodes recognised as valid
|
|
||||||
- URLs of stolen emoji is no longer predictable
|
|
||||||
- The `Dedupe` upload filter is now always active;
|
|
||||||
`AnonymizeFilenames` is again opt-in
|
|
||||||
- received AP data is sanity checked before we attempt to parse it as a user
|
|
||||||
- Uploads, emoji and media proxy now restrict Content-Type headers to a safe subset
|
|
||||||
- Akkoma will no longer fetch and parse objects hosted on the same domain
|
|
||||||
|
|
||||||
## Fixed
|
|
||||||
- Critical security issue allowing Akkoma to be used as a vector for
|
|
||||||
(depending on configuration) impersonation of other users or creation
|
|
||||||
of bogus users and posts on the upload domain
|
|
||||||
- Critical security issue letting Akkoma fall for the above impersonation
|
|
||||||
payloads due to lack of strict id checking
|
|
||||||
- Critical security issue allowing domains redirect to to pose as the initial domain
|
|
||||||
(e.g. with media proxy's fallback redirects)
|
|
||||||
- refetched objects can no longer attribute themselves to third-party actors
|
|
||||||
(this had no externally visible effect since actor info is read from the Create activity)
|
|
||||||
- our litepub JSON-LD schema is now served with the correct content type
|
|
||||||
- remote APNG attachments are now recognised as images
|
|
||||||
|
|
||||||
## Upgrade Notes
|
|
||||||
|
|
||||||
- As mentioned in "Changed", `Pleroma.Upload, :base_url` **MUST** be configured. Uploads will fail without it.
|
|
||||||
- Akkoma will refuse to start if this is not set.
|
|
||||||
- Same with media proxy.
|
|
||||||
|
|
||||||
## 2024.02
|
|
||||||
|
|
||||||
## Added
|
## Added
|
||||||
- Full compatibility with Erlang OTP26
|
- Full compatibility with Erlang OTP26
|
||||||
- handling of GET /api/v1/preferences
|
|
||||||
- Akkoma API is now documented
|
|
||||||
- ability to auto-approve follow requests from users you are already following
|
|
||||||
- The SimplePolicy MRF can now strip user backgrounds from selected remote hosts
|
|
||||||
|
|
||||||
## Changed
|
|
||||||
- OTP builds are now built on erlang OTP26
|
|
||||||
- The base Phoenix framework is now updated to 1.7
|
|
||||||
- An `outbox` field has been added to actor profiles to comply with AP spec
|
|
||||||
- User profile backgrounds do now federate with other Akkoma instances and Sharkey
|
|
||||||
|
|
||||||
## Fixed
|
|
||||||
- Documentation issue in which a non-existing nginx file was referenced
|
|
||||||
- Issue where a bad inbox URL could break federation
|
|
||||||
- Issue where hashtag rel values would be scrubbed
|
|
||||||
- Issue where short domains listed in `transparency_obfuscate_domains` were not actually obfuscated
|
|
||||||
|
|
||||||
## 2023.08
|
## 2023.08
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
FROM hexpm/elixir:1.15.4-erlang-26.0.2-alpine-3.18.2
|
FROM hexpm/elixir:1.15.4-erlang-25.3.2.5-alpine-3.18.2
|
||||||
|
|
||||||
ENV MIX_ENV=prod
|
ENV MIX_ENV=prod
|
||||||
ENV ERL_EPMD_ADDRESS=127.0.0.1
|
ENV ERL_EPMD_ADDRESS=127.0.0.1
|
||||||
|
|
|
@ -1,42 +0,0 @@
|
||||||
# Federation
|
|
||||||
|
|
||||||
## Supported federation protocols and standards
|
|
||||||
|
|
||||||
- [ActivityPub](https://www.w3.org/TR/activitypub/) (Server-to-Server)
|
|
||||||
- [WebFinger](https://webfinger.net/)
|
|
||||||
- [Http Signatures](https://datatracker.ietf.org/doc/html/draft-cavage-http-signatures)
|
|
||||||
- [NodeInfo](https://nodeinfo.diaspora.software/)
|
|
||||||
|
|
||||||
## Supported FEPs
|
|
||||||
|
|
||||||
- [FEP-67ff: FEDERATION](https://codeberg.org/fediverse/fep/src/branch/main/fep/67ff/fep-67ff.md)
|
|
||||||
- [FEP-f1d5: NodeInfo in Fediverse Software](https://codeberg.org/fediverse/fep/src/branch/main/fep/f1d5/fep-f1d5.md)
|
|
||||||
- [FEP-fffd: Proxy Objects](https://codeberg.org/fediverse/fep/src/branch/main/fep/fffd/fep-fffd.md)
|
|
||||||
|
|
||||||
## ActivityPub
|
|
||||||
|
|
||||||
Akkoma mostly follows the server-to-server parts of the ActivityPub standard,
|
|
||||||
but implements quirks for Mastodon compatibility as well as Mastodon-specific
|
|
||||||
and custom extensions.
|
|
||||||
|
|
||||||
See our documentation and Mastodon’s federation information
|
|
||||||
linked further below for details on these quirks and extensions.
|
|
||||||
|
|
||||||
Akkoma does not perform JSON-LD processing.
|
|
||||||
|
|
||||||
### Required extensions
|
|
||||||
|
|
||||||
#### HTTP Signatures
|
|
||||||
All AP S2S POST requests to Akkoma instances MUST be signed.
|
|
||||||
Depending on instance configuration the same may be true for GET requests.
|
|
||||||
|
|
||||||
## Nodeinfo
|
|
||||||
|
|
||||||
Akkoma provides many additional entries in its nodeinfo response,
|
|
||||||
see the documentation linked below for details.
|
|
||||||
|
|
||||||
## Additional documentation
|
|
||||||
|
|
||||||
- [Akkoma’s ActivityPub extensions](https://docs.akkoma.dev/develop/development/ap_extensions/)
|
|
||||||
- [Akkoma’s nodeinfo extensions](https://docs.akkoma.dev/develop/development/nodeinfo_extensions/)
|
|
||||||
- [Mastodon’s federation requirements](https://github.com/mastodon/mastodon/blob/main/FEDERATION.md)
|
|
2
Procfile
Normal file
2
Procfile
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
web: mix phx.server
|
||||||
|
release: mix ecto.migrate
|
27
SECURITY.md
27
SECURITY.md
|
@ -1,21 +1,16 @@
|
||||||
# Akkoma backend security handling
|
# Pleroma backend security policy
|
||||||
|
|
||||||
|
## Supported versions
|
||||||
|
|
||||||
|
Currently, Pleroma offers bugfixes and security patches only for the latest minor release.
|
||||||
|
|
||||||
|
| Version | Support
|
||||||
|
|---------| --------
|
||||||
|
| 2.2 | Bugfixes and security patches
|
||||||
|
|
||||||
## Reporting a vulnerability
|
## Reporting a vulnerability
|
||||||
|
|
||||||
Please send an email (preferably encrypted) or
|
Please use confidential issues (tick the "This issue is confidential and should only be visible to team members with at least Reporter access." box when submitting) at our [bugtracker](https://git.pleroma.social/pleroma/pleroma/-/issues/new) for reporting vulnerabilities.
|
||||||
a DM via our IRC to one of the following people:
|
|
||||||
|
|
||||||
| Forgejo nick | IRC nick | Email | GPG |
|
|
||||||
| ------------ | ------------- | ------------- | --------------------------------------- |
|
|
||||||
| floatinghost | FloatingGhost | *see GPG key* | https://coffee-and-dreams.uk/pubkey.asc |
|
|
||||||
|
|
||||||
## Announcements
|
## Announcements
|
||||||
|
|
||||||
New releases and security issues are announced at
|
New releases are announced at [pleroma.social](https://pleroma.social/announcements/). All security releases are tagged with ["Security"](https://pleroma.social/announcements/tags/security/). You can be notified of them by subscribing to an Atom feed at <https://pleroma.social/announcements/tags/security/feed.xml>.
|
||||||
[meta.akkoma.dev](https://meta.akkoma.dev/c/releases) and
|
|
||||||
[@akkoma@ihatebeinga.live](https://ihatebeinga.live/akkoma).
|
|
||||||
|
|
||||||
Both also offer RSS feeds
|
|
||||||
([meta](https://meta.akkoma.dev/c/releases/7.rss),
|
|
||||||
[fedi](https://ihatebeinga.live/users/akkoma.rss))
|
|
||||||
so you can keep an eye on it without any accounts.
|
|
||||||
|
|
|
@ -61,11 +61,11 @@
|
||||||
# Upload configuration
|
# Upload configuration
|
||||||
config :pleroma, Pleroma.Upload,
|
config :pleroma, Pleroma.Upload,
|
||||||
uploader: Pleroma.Uploaders.Local,
|
uploader: Pleroma.Uploaders.Local,
|
||||||
filters: [],
|
filters: [Pleroma.Upload.Filter.Dedupe],
|
||||||
link_name: false,
|
link_name: false,
|
||||||
|
proxy_remote: false,
|
||||||
filename_display_max_length: 30,
|
filename_display_max_length: 30,
|
||||||
base_url: nil,
|
base_url: nil
|
||||||
allowed_mime_types: ["image", "audio", "video"]
|
|
||||||
|
|
||||||
config :pleroma, Pleroma.Uploaders.Local, uploads: "uploads"
|
config :pleroma, Pleroma.Uploaders.Local, uploads: "uploads"
|
||||||
|
|
||||||
|
@ -110,6 +110,17 @@
|
||||||
"xmpp"
|
"xmpp"
|
||||||
]
|
]
|
||||||
|
|
||||||
|
websocket_config = [
|
||||||
|
path: "/websocket",
|
||||||
|
serializer: [
|
||||||
|
{Phoenix.Socket.V1.JSONSerializer, "~> 1.0.0"},
|
||||||
|
{Phoenix.Socket.V2.JSONSerializer, "~> 2.0.0"}
|
||||||
|
],
|
||||||
|
timeout: 60_000,
|
||||||
|
transport_log: false,
|
||||||
|
compress: false
|
||||||
|
]
|
||||||
|
|
||||||
# Configures the endpoint
|
# Configures the endpoint
|
||||||
config :pleroma, Pleroma.Web.Endpoint,
|
config :pleroma, Pleroma.Web.Endpoint,
|
||||||
url: [host: "localhost"],
|
url: [host: "localhost"],
|
||||||
|
@ -119,7 +130,10 @@
|
||||||
{:_,
|
{:_,
|
||||||
[
|
[
|
||||||
{"/api/v1/streaming", Pleroma.Web.MastodonAPI.WebsocketHandler, []},
|
{"/api/v1/streaming", Pleroma.Web.MastodonAPI.WebsocketHandler, []},
|
||||||
{:_, Plug.Cowboy.Handler, {Pleroma.Web.Endpoint, []}}
|
{"/websocket", Phoenix.Endpoint.CowboyWebSocket,
|
||||||
|
{Phoenix.Transports.WebSocket,
|
||||||
|
{Pleroma.Web.Endpoint, Pleroma.Web.UserSocket, websocket_config}}},
|
||||||
|
{:_, Phoenix.Endpoint.Cowboy2Handler, {Pleroma.Web.Endpoint, []}}
|
||||||
]}
|
]}
|
||||||
]
|
]
|
||||||
],
|
],
|
||||||
|
@ -148,38 +162,18 @@
|
||||||
format: "$metadata[$level] $message",
|
format: "$metadata[$level] $message",
|
||||||
metadata: [:request_id]
|
metadata: [:request_id]
|
||||||
|
|
||||||
# ———————————————————————————————————————————————————————————————
|
|
||||||
# W A R N I N G
|
|
||||||
# ———————————————————————————————————————————————————————————————
|
|
||||||
#
|
|
||||||
# Whenever adding a privileged new custom type for e.g.
|
|
||||||
# ActivityPub objects, ALWAYS map their extension back
|
|
||||||
# to "application/octet-stream".
|
|
||||||
# Else files served by us can automatically end up with
|
|
||||||
# those privileged types causing severe security hazards.
|
|
||||||
# (We need those mappings so Phoenix can assoiate its format
|
|
||||||
# (the "extension") to incoming requests of those MIME types)
|
|
||||||
#
|
|
||||||
# ———————————————————————————————————————————————————————————————
|
|
||||||
config :mime, :types, %{
|
config :mime, :types, %{
|
||||||
"application/xml" => ["xml"],
|
"application/xml" => ["xml"],
|
||||||
"application/xrd+xml" => ["xrd+xml"],
|
"application/xrd+xml" => ["xrd+xml"],
|
||||||
"application/jrd+json" => ["jrd+json"],
|
"application/jrd+json" => ["jrd+json"],
|
||||||
"application/activity+json" => ["activity+json"],
|
"application/activity+json" => ["activity+json"],
|
||||||
"application/ld+json" => ["activity+json"],
|
"application/ld+json" => ["activity+json"]
|
||||||
# Can be removed when bumping MIME past 2.0.5
|
|
||||||
# see https://akkoma.dev/AkkomaGang/akkoma/issues/657
|
|
||||||
"image/apng" => ["apng"]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
config :mime, :extensions, %{
|
config :mime, :extensions, %{
|
||||||
"xrd+xml" => "text/plain",
|
"activity+json" => "application/activity+json"
|
||||||
"jrd+json" => "text/plain",
|
|
||||||
"activity+json" => "text/plain"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# ———————————————————————————————————————————————————————————————
|
|
||||||
|
|
||||||
config :tesla, :adapter, {Tesla.Adapter.Finch, name: MyFinch}
|
config :tesla, :adapter, {Tesla.Adapter.Finch, name: MyFinch}
|
||||||
|
|
||||||
# Configures http settings, upstream proxy etc.
|
# Configures http settings, upstream proxy etc.
|
||||||
|
@ -188,10 +182,8 @@
|
||||||
receive_timeout: :timer.seconds(15),
|
receive_timeout: :timer.seconds(15),
|
||||||
proxy_url: nil,
|
proxy_url: nil,
|
||||||
user_agent: :default,
|
user_agent: :default,
|
||||||
pool_size: 10,
|
pool_size: 50,
|
||||||
adapter: [],
|
adapter: []
|
||||||
# see: https://hexdocs.pm/finch/Finch.html#start_link/1
|
|
||||||
pool_max_idle_time: :timer.seconds(30)
|
|
||||||
|
|
||||||
config :pleroma, :instance,
|
config :pleroma, :instance,
|
||||||
name: "Akkoma",
|
name: "Akkoma",
|
||||||
|
@ -312,6 +304,7 @@
|
||||||
alwaysShowSubjectInput: true,
|
alwaysShowSubjectInput: true,
|
||||||
background: "/images/city.jpg",
|
background: "/images/city.jpg",
|
||||||
collapseMessageWithSubject: false,
|
collapseMessageWithSubject: false,
|
||||||
|
disableChat: false,
|
||||||
greentext: false,
|
greentext: false,
|
||||||
hideFilteredStatuses: false,
|
hideFilteredStatuses: false,
|
||||||
hideMutedPosts: false,
|
hideMutedPosts: false,
|
||||||
|
@ -399,7 +392,6 @@
|
||||||
accept: [],
|
accept: [],
|
||||||
avatar_removal: [],
|
avatar_removal: [],
|
||||||
banner_removal: [],
|
banner_removal: [],
|
||||||
background_removal: [],
|
|
||||||
reject_deletes: [],
|
reject_deletes: [],
|
||||||
handle_threads: true
|
handle_threads: true
|
||||||
|
|
||||||
|
@ -428,6 +420,8 @@
|
||||||
threshold: 604_800,
|
threshold: 604_800,
|
||||||
actions: [:delist, :strip_followers]
|
actions: [:delist, :strip_followers]
|
||||||
|
|
||||||
|
config :pleroma, :mrf_follow_bot, follower_nickname: nil
|
||||||
|
|
||||||
config :pleroma, :mrf_reject_newly_created_account_notes, age: 86_400
|
config :pleroma, :mrf_reject_newly_created_account_notes, age: 86_400
|
||||||
|
|
||||||
config :pleroma, :rich_media,
|
config :pleroma, :rich_media,
|
||||||
|
@ -438,12 +432,8 @@
|
||||||
Pleroma.Web.RichMedia.Parsers.TwitterCard,
|
Pleroma.Web.RichMedia.Parsers.TwitterCard,
|
||||||
Pleroma.Web.RichMedia.Parsers.OEmbed
|
Pleroma.Web.RichMedia.Parsers.OEmbed
|
||||||
],
|
],
|
||||||
failure_backoff: 60_000,
|
failure_backoff: :timer.minutes(20),
|
||||||
ttl_setters: [
|
ttl_setters: [Pleroma.Web.RichMedia.Parser.TTL.AwsSignedUrl]
|
||||||
Pleroma.Web.RichMedia.Parser.TTL.AwsSignedUrl,
|
|
||||||
Pleroma.Web.RichMedia.Parser.TTL.Opengraph
|
|
||||||
],
|
|
||||||
max_body: 5_000_000
|
|
||||||
|
|
||||||
config :pleroma, :media_proxy,
|
config :pleroma, :media_proxy,
|
||||||
enabled: false,
|
enabled: false,
|
||||||
|
@ -477,6 +467,10 @@
|
||||||
image_quality: 85,
|
image_quality: 85,
|
||||||
min_content_length: 100 * 1024
|
min_content_length: 100 * 1024
|
||||||
|
|
||||||
|
config :pleroma, :shout,
|
||||||
|
enabled: true,
|
||||||
|
limit: 5_000
|
||||||
|
|
||||||
config :phoenix, :format_encoders, json: Jason, "activity+json": Jason
|
config :phoenix, :format_encoders, json: Jason, "activity+json": Jason
|
||||||
|
|
||||||
config :phoenix, :json_library, Jason
|
config :phoenix, :json_library, Jason
|
||||||
|
@ -581,9 +575,7 @@
|
||||||
mute_expire: 5,
|
mute_expire: 5,
|
||||||
search_indexing: 10,
|
search_indexing: 10,
|
||||||
nodeinfo_fetcher: 1,
|
nodeinfo_fetcher: 1,
|
||||||
database_prune: 1,
|
database_prune: 1
|
||||||
rich_media_backfill: 2,
|
|
||||||
rich_media_expiration: 2
|
|
||||||
],
|
],
|
||||||
plugins: [
|
plugins: [
|
||||||
Oban.Plugins.Pruner,
|
Oban.Plugins.Pruner,
|
||||||
|
@ -599,8 +591,7 @@
|
||||||
retries: [
|
retries: [
|
||||||
federator_incoming: 5,
|
federator_incoming: 5,
|
||||||
federator_outgoing: 5,
|
federator_outgoing: 5,
|
||||||
search_indexing: 2,
|
search_indexing: 2
|
||||||
rich_media_backfill: 3
|
|
||||||
],
|
],
|
||||||
timeout: [
|
timeout: [
|
||||||
activity_expiration: :timer.seconds(5),
|
activity_expiration: :timer.seconds(5),
|
||||||
|
@ -622,8 +613,7 @@
|
||||||
mute_expire: :timer.seconds(5),
|
mute_expire: :timer.seconds(5),
|
||||||
search_indexing: :timer.seconds(5),
|
search_indexing: :timer.seconds(5),
|
||||||
nodeinfo_fetcher: :timer.seconds(10),
|
nodeinfo_fetcher: :timer.seconds(10),
|
||||||
database_prune: :timer.minutes(10),
|
database_prune: :timer.minutes(10)
|
||||||
rich_media_backfill: :timer.seconds(30)
|
|
||||||
]
|
]
|
||||||
|
|
||||||
config :pleroma, Pleroma.Formatter,
|
config :pleroma, Pleroma.Formatter,
|
||||||
|
@ -822,10 +812,8 @@
|
||||||
config :pleroma, configurable_from_database: false
|
config :pleroma, configurable_from_database: false
|
||||||
|
|
||||||
config :pleroma, Pleroma.Repo,
|
config :pleroma, Pleroma.Repo,
|
||||||
parameters: [
|
parameters: [gin_fuzzy_search_limit: "500"],
|
||||||
gin_fuzzy_search_limit: "500",
|
prepare: :unnamed
|
||||||
plan_cache_mode: "force_custom_plan"
|
|
||||||
]
|
|
||||||
|
|
||||||
config :pleroma, :majic_pool, size: 2
|
config :pleroma, :majic_pool, size: 2
|
||||||
|
|
||||||
|
|
|
@ -100,23 +100,18 @@
|
||||||
label: "Base URL",
|
label: "Base URL",
|
||||||
type: :string,
|
type: :string,
|
||||||
description:
|
description:
|
||||||
"Base URL for the uploads. Required if you use a CDN or host attachments under a different domain - it is HIGHLY recommended that you **do not** set this to be the same as the domain akkoma is hosted on.",
|
"Base URL for the uploads. Required if you use a CDN or host attachments under a different domain.",
|
||||||
suggestions: [
|
suggestions: [
|
||||||
"https://media.akkoma.dev/media/"
|
"https://cdn-host.com"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
%{
|
%{
|
||||||
key: :allowed_mime_types,
|
key: :proxy_remote,
|
||||||
label: "Allowed MIME types",
|
type: :boolean,
|
||||||
type: {:list, :string},
|
description: """
|
||||||
description:
|
Proxy requests to the remote uploader.\n
|
||||||
"List of MIME (main) types uploads are allowed to identify themselves with. Other types may still be uploaded, but will identify as a generic binary to clients. WARNING: Loosening this over the defaults can lead to security issues. Removing types is safe, but only add to the list if you are sure you know what you are doing.",
|
Useful if media upload endpoint is not internet accessible.
|
||||||
suggestions: [
|
"""
|
||||||
"image",
|
|
||||||
"audio",
|
|
||||||
"video",
|
|
||||||
"font"
|
|
||||||
]
|
|
||||||
},
|
},
|
||||||
%{
|
%{
|
||||||
key: :filename_display_max_length,
|
key: :filename_display_max_length,
|
||||||
|
@ -214,26 +209,6 @@
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
%{
|
|
||||||
group: :pleroma,
|
|
||||||
key: Pleroma.Upload.Filter.Exiftool.StripMetadata,
|
|
||||||
type: :group,
|
|
||||||
description: "Strip specified metadata from image uploads",
|
|
||||||
children: [
|
|
||||||
%{
|
|
||||||
key: :purge,
|
|
||||||
description: "Metadata fields or groups to strip",
|
|
||||||
type: {:list, :string},
|
|
||||||
suggestions: ["all", "CommonIFD0"]
|
|
||||||
},
|
|
||||||
%{
|
|
||||||
key: :preserve,
|
|
||||||
description: "Metadata fields or groups to preserve (takes precedence over stripping)",
|
|
||||||
type: {:list, :string},
|
|
||||||
suggestions: ["ColorSpaces", "Orientation"]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
%{
|
%{
|
||||||
group: :pleroma,
|
group: :pleroma,
|
||||||
key: Pleroma.Emails.Mailer,
|
key: Pleroma.Emails.Mailer,
|
||||||
|
@ -2709,8 +2684,8 @@
|
||||||
%{
|
%{
|
||||||
key: :pool_size,
|
key: :pool_size,
|
||||||
type: :integer,
|
type: :integer,
|
||||||
description: "Number of concurrent outbound HTTP requests to allow PER HOST. Default 10.",
|
description: "Number of concurrent outbound HTTP requests to allow. Default 50.",
|
||||||
suggestions: [10]
|
suggestions: [50]
|
||||||
},
|
},
|
||||||
%{
|
%{
|
||||||
key: :adapter,
|
key: :adapter,
|
||||||
|
@ -2733,13 +2708,6 @@
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
|
||||||
%{
|
|
||||||
key: :pool_max_idle_time,
|
|
||||||
type: :integer,
|
|
||||||
description:
|
|
||||||
"Number of seconds to retain an HTTP pool; pool will remain if actively in use. Default 30 seconds (in ms).",
|
|
||||||
suggestions: [30_000]
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|
25
config/dokku.exs
Normal file
25
config/dokku.exs
Normal file
|
@ -0,0 +1,25 @@
|
||||||
|
import Config
|
||||||
|
|
||||||
|
config :pleroma, Pleroma.Web.Endpoint,
|
||||||
|
http: [
|
||||||
|
port: String.to_integer(System.get_env("PORT") || "4000"),
|
||||||
|
protocol_options: [max_request_line_length: 8192, max_header_value_length: 8192]
|
||||||
|
],
|
||||||
|
protocol: "http",
|
||||||
|
secure_cookie_flag: false,
|
||||||
|
url: [host: System.get_env("APP_HOST"), scheme: "https", port: 443],
|
||||||
|
secret_key_base: "+S+ULgf7+N37c/lc9K66SMphnjQIRGklTu0BRr2vLm2ZzvK0Z6OH/PE77wlUNtvP"
|
||||||
|
|
||||||
|
database_url =
|
||||||
|
System.get_env("DATABASE_URL") ||
|
||||||
|
raise """
|
||||||
|
environment variable DATABASE_URL is missing.
|
||||||
|
For example: ecto://USER:PASS@HOST/DATABASE
|
||||||
|
"""
|
||||||
|
|
||||||
|
config :pleroma, Pleroma.Repo,
|
||||||
|
# ssl: true,
|
||||||
|
url: database_url,
|
||||||
|
pool_size: String.to_integer(System.get_env("POOL_SIZE") || "10")
|
||||||
|
|
||||||
|
config :pleroma, :instance, name: "#{System.get_env("APP_NAME")} CI Instance"
|
|
@ -22,12 +22,9 @@
|
||||||
config :pleroma, :auth, oauth_consumer_strategies: []
|
config :pleroma, :auth, oauth_consumer_strategies: []
|
||||||
|
|
||||||
config :pleroma, Pleroma.Upload,
|
config :pleroma, Pleroma.Upload,
|
||||||
base_url: "http://localhost:4001/media/",
|
|
||||||
filters: [],
|
filters: [],
|
||||||
link_name: false
|
link_name: false
|
||||||
|
|
||||||
config :pleroma, :media_proxy, base_url: "http://localhost:4001"
|
|
||||||
|
|
||||||
config :pleroma, Pleroma.Uploaders.Local, uploads: "test/uploads"
|
config :pleroma, Pleroma.Uploaders.Local, uploads: "test/uploads"
|
||||||
|
|
||||||
config :pleroma, Pleroma.Emails.Mailer, adapter: Swoosh.Adapters.Test, enabled: true
|
config :pleroma, Pleroma.Emails.Mailer, adapter: Swoosh.Adapters.Test, enabled: true
|
||||||
|
@ -51,8 +48,7 @@
|
||||||
hostname: System.get_env("DB_HOST") || "localhost",
|
hostname: System.get_env("DB_HOST") || "localhost",
|
||||||
pool: Ecto.Adapters.SQL.Sandbox,
|
pool: Ecto.Adapters.SQL.Sandbox,
|
||||||
pool_size: 50,
|
pool_size: 50,
|
||||||
queue_target: 5000,
|
queue_target: 5000
|
||||||
log: false
|
|
||||||
|
|
||||||
config :pleroma, :dangerzone, override_repo_pool_size: true
|
config :pleroma, :dangerzone, override_repo_pool_size: true
|
||||||
|
|
||||||
|
@ -64,8 +60,7 @@
|
||||||
config :pleroma, :rich_media,
|
config :pleroma, :rich_media,
|
||||||
enabled: false,
|
enabled: false,
|
||||||
ignore_hosts: [],
|
ignore_hosts: [],
|
||||||
ignore_tld: ["local", "localdomain", "lan"],
|
ignore_tld: ["local", "localdomain", "lan"]
|
||||||
max_body: 2_000_000
|
|
||||||
|
|
||||||
config :pleroma, :instance,
|
config :pleroma, :instance,
|
||||||
multi_factor_authentication: [
|
multi_factor_authentication: [
|
||||||
|
@ -143,8 +138,6 @@
|
||||||
config :pleroma, :instances_favicons, enabled: false
|
config :pleroma, :instances_favicons, enabled: false
|
||||||
config :pleroma, :instances_nodeinfo, enabled: false
|
config :pleroma, :instances_nodeinfo, enabled: false
|
||||||
|
|
||||||
config :pleroma, Pleroma.Web.RichMedia.Backfill, provider: Pleroma.Web.RichMedia.Backfill
|
|
||||||
|
|
||||||
if File.exists?("./config/test.secret.exs") do
|
if File.exists?("./config/test.secret.exs") do
|
||||||
import_config "test.secret.exs"
|
import_config "test.secret.exs"
|
||||||
else
|
else
|
||||||
|
|
7
coveralls.json
Normal file
7
coveralls.json
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
{
|
||||||
|
"skip_files": [
|
||||||
|
"test/support",
|
||||||
|
"lib/mix/tasks/pleroma/benchmark.ex",
|
||||||
|
"lib/credo/check/consistency/file_location.ex"
|
||||||
|
]
|
||||||
|
}
|
|
@ -1,10 +0,0 @@
|
||||||
if [ "$#" -ne 2 ]; then
|
|
||||||
echo "Usage: binary-leak-checker.sh <nodename> <erlang cookie>"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "The command you want to run is:
|
|
||||||
:recon.bin_leak(10)
|
|
||||||
"
|
|
||||||
|
|
||||||
iex --sname debug --remsh $1 --erl "-setcookie $2"
|
|
|
@ -4,7 +4,6 @@ services:
|
||||||
db:
|
db:
|
||||||
image: akkoma-db:latest
|
image: akkoma-db:latest
|
||||||
build: ./docker-resources/database
|
build: ./docker-resources/database
|
||||||
shm_size: 4gb
|
|
||||||
restart: unless-stopped
|
restart: unless-stopped
|
||||||
user: ${DOCKER_USER}
|
user: ${DOCKER_USER}
|
||||||
environment: {
|
environment: {
|
||||||
|
@ -46,7 +45,7 @@ services:
|
||||||
volumes:
|
volumes:
|
||||||
- .:/opt/akkoma
|
- .:/opt/akkoma
|
||||||
|
|
||||||
# Copy this into docker-compose.override.yml and uncomment there if you want to use a reverse proxy
|
# Uncomment the following if you want to use a reverse proxy
|
||||||
#proxy:
|
#proxy:
|
||||||
# image: caddy:2-alpine
|
# image: caddy:2-alpine
|
||||||
# restart: unless-stopped
|
# restart: unless-stopped
|
||||||
|
|
|
@ -11,4 +11,4 @@ echo "-- Running migrations..."
|
||||||
mix ecto.migrate
|
mix ecto.migrate
|
||||||
|
|
||||||
echo "-- Starting!"
|
echo "-- Starting!"
|
||||||
elixir --erl "+sbwt none +sbwtdcpu none +sbwtdio none" -S mix phx.server
|
mix phx.server
|
||||||
|
|
|
@ -50,39 +50,9 @@ This will prune remote posts older than 90 days (configurable with [`config :ple
|
||||||
|
|
||||||
- `--keep-threads` - Don't prune posts when they are part of a thread where at least one post has seen local interaction (e.g. one of the posts is a local post, or is favourited by a local user, or has been repeated by a local user...). It also wont delete posts when at least one of the posts in that thread is kept (e.g. because one of the posts has seen recent activity).
|
- `--keep-threads` - Don't prune posts when they are part of a thread where at least one post has seen local interaction (e.g. one of the posts is a local post, or is favourited by a local user, or has been repeated by a local user...). It also wont delete posts when at least one of the posts in that thread is kept (e.g. because one of the posts has seen recent activity).
|
||||||
- `--keep-non-public` - Keep non-public posts like DM's and followers-only, even if they are remote.
|
- `--keep-non-public` - Keep non-public posts like DM's and followers-only, even if they are remote.
|
||||||
- `--limit` - limits how many remote posts get pruned. This limit does **not** apply to any of the follow up jobs. If wanting to keep the database load in check it is thus advisable to run the standalone `prune_orphaned_activities` task with a limit afterwards instead of passing `--prune-orphaned-activities` to this task.
|
|
||||||
- `--prune-orphaned-activities` - Also prune orphaned activities afterwards. Activities are things like Like, Create, Announce, Flag (aka reports)... They can significantly help reduce the database size.
|
- `--prune-orphaned-activities` - Also prune orphaned activities afterwards. Activities are things like Like, Create, Announce, Flag (aka reports)... They can significantly help reduce the database size.
|
||||||
- `--vacuum` - Run `VACUUM FULL` after the objects are pruned. This should not be used on a regular basis, but is useful if your instance has been running for a long time before pruning.
|
- `--vacuum` - Run `VACUUM FULL` after the objects are pruned. This should not be used on a regular basis, but is useful if your instance has been running for a long time before pruning.
|
||||||
|
|
||||||
## Prune orphaned activities from the database
|
|
||||||
|
|
||||||
This will prune activities which are no longer referenced by anything.
|
|
||||||
Such activities might be the result of running `prune_objects` without `--prune-orphaned-activities`.
|
|
||||||
The same notes and warnings apply as for `prune_objects`.
|
|
||||||
|
|
||||||
The task will print out how many rows were freed in total in its last
|
|
||||||
line of output in the form `Deleted 345 rows`.
|
|
||||||
When running the job in limited batches this can be used to determine
|
|
||||||
when all orphaned activities have been deleted.
|
|
||||||
|
|
||||||
=== "OTP"
|
|
||||||
|
|
||||||
```sh
|
|
||||||
./bin/pleroma_ctl database prune_orphaned_activities [option ...]
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "From Source"
|
|
||||||
|
|
||||||
```sh
|
|
||||||
mix pleroma.database prune_orphaned_activities [option ...]
|
|
||||||
```
|
|
||||||
|
|
||||||
### Options
|
|
||||||
|
|
||||||
- `--limit n` - Only delete up to `n` activities in each query making up this job, i.e. if this job runs two queries at most `2n` activities will be deleted. Running this task repeatedly in limited batches can help maintain the instance’s responsiveness while still freeing up some space.
|
|
||||||
- `--no-singles` - Do not delete activites referencing single objects
|
|
||||||
- `--no-arrays` - Do not delete activites referencing an array of objects
|
|
||||||
|
|
||||||
## Create a conversation for all existing DMs
|
## Create a conversation for all existing DMs
|
||||||
|
|
||||||
Can be safely re-run
|
Can be safely re-run
|
||||||
|
|
|
@ -37,8 +37,7 @@ If any of the options are left unspecified, you will be prompted interactively.
|
||||||
- `--static-dir <path>` - the directory custom public files should be read from (custom emojis, frontend bundle overrides, robots.txt, etc.)
|
- `--static-dir <path>` - the directory custom public files should be read from (custom emojis, frontend bundle overrides, robots.txt, etc.)
|
||||||
- `--listen-ip <ip>` - the ip the app should listen to, defaults to 127.0.0.1
|
- `--listen-ip <ip>` - the ip the app should listen to, defaults to 127.0.0.1
|
||||||
- `--listen-port <port>` - the port the app should listen to, defaults to 4000
|
- `--listen-port <port>` - the port the app should listen to, defaults to 4000
|
||||||
- `--strip-uploads-metadata <Y|N>` - use ExifTool to strip uploads of metadata when possible
|
- `--strip-uploads <Y|N>` - use ExifTool to strip uploads of sensitive location data
|
||||||
- `--read-uploads-description <Y|N>` - use ExifTool to read image descriptions from uploads
|
|
||||||
- `--anonymize-uploads <Y|N>` - randomize uploaded filenames
|
- `--anonymize-uploads <Y|N>` - randomize uploaded filenames
|
||||||
- `--dedupe-uploads <Y|N>` - store files based on their hash to reduce data storage requirements if duplicates are uploaded with different filenames
|
- `--dedupe-uploads <Y|N>` - store files based on their hash to reduce data storage requirements if duplicates are uploaded with different filenames
|
||||||
- `--skip-release-env` - skip generation the release environment file
|
- `--skip-release-env` - skip generation the release environment file
|
||||||
|
|
|
@ -17,5 +17,5 @@ If you want to generate a restrictive `robots.txt`, you can run the following mi
|
||||||
=== "From Source"
|
=== "From Source"
|
||||||
|
|
||||||
```sh
|
```sh
|
||||||
mix pleroma.robots_txt disallow_all
|
mix pleroma.robotstxt disallow_all
|
||||||
```
|
```
|
||||||
|
|
|
@ -1,56 +0,0 @@
|
||||||
# Security-related tasks
|
|
||||||
|
|
||||||
{! administration/CLI_tasks/general_cli_task_info.include !}
|
|
||||||
|
|
||||||
!!! danger
|
|
||||||
Many of these tasks were written in response to a patched exploit.
|
|
||||||
It is recommended to run those very soon after installing its respective security update.
|
|
||||||
Over time with db migrations they might become less accurate or be removed altogether.
|
|
||||||
If you never ran an affected version, there’s no point in running them.
|
|
||||||
|
|
||||||
## Spoofed AcitivityPub objects exploit (2024-03, fixed in 3.11.1)
|
|
||||||
|
|
||||||
### Search for uploaded spoofing payloads
|
|
||||||
|
|
||||||
Scans local uploads for spoofing payloads.
|
|
||||||
If the instance is not using the local uploader it was not affected.
|
|
||||||
Attachments wil be scanned anyway in case local uploader was used in the past.
|
|
||||||
|
|
||||||
!!! note
|
|
||||||
This cannot reliably detect payloads attached to deleted posts.
|
|
||||||
|
|
||||||
=== "OTP"
|
|
||||||
|
|
||||||
```sh
|
|
||||||
./bin/pleroma_ctl security spoof-uploaded
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "From Source"
|
|
||||||
|
|
||||||
```sh
|
|
||||||
mix pleroma.security spoof-uploaded
|
|
||||||
```
|
|
||||||
|
|
||||||
### Search for counterfeit posts in database
|
|
||||||
|
|
||||||
Scans all notes in the database for signs of being spoofed.
|
|
||||||
|
|
||||||
!!! note
|
|
||||||
Spoofs targeting local accounts can be detected rather reliably
|
|
||||||
(with some restrictions documented in the task’s logs).
|
|
||||||
Counterfeit posts from remote users cannot. A best-effort attempt is made, but
|
|
||||||
a thorough attacker can avoid this and it may yield a small amount of false positives.
|
|
||||||
|
|
||||||
Should you find counterfeit posts of local users, let other admins know so they can delete the too.
|
|
||||||
|
|
||||||
=== "OTP"
|
|
||||||
|
|
||||||
```sh
|
|
||||||
./bin/pleroma_ctl security spoof-inserted
|
|
||||||
```
|
|
||||||
|
|
||||||
=== "From Source"
|
|
||||||
|
|
||||||
```sh
|
|
||||||
mix pleroma.security spoof-inserted
|
|
||||||
```
|
|
|
@ -4,12 +4,12 @@
|
||||||
|
|
||||||
1. Stop the Akkoma service.
|
1. Stop the Akkoma service.
|
||||||
2. Go to the working directory of Akkoma (default is `/opt/akkoma`)
|
2. Go to the working directory of Akkoma (default is `/opt/akkoma`)
|
||||||
3. Run `sudo -Hu postgres pg_dump -d akkoma --format=custom -f </path/to/backup_location/akkoma.pgdump>`[¹] (make sure the postgres user has write access to the destination file)
|
3. Run[¹] `sudo -Hu postgres pg_dump -d akkoma --format=custom -f </path/to/backup_location/akkoma.pgdump>` (make sure the postgres user has write access to the destination file)
|
||||||
4. Copy `akkoma.pgdump`, `config/config.exs`[²], `uploads` folder, and [static directory](../configuration/static_dir.md) to your backup destination. If you have other modifications, copy those changes too.
|
4. Copy `akkoma.pgdump`, `config/prod.secret.exs`[²], `config/setup_db.psql` (if still available) and the `uploads` folder to your backup destination. If you have other modifications, copy those changes too.
|
||||||
5. Restart the Akkoma service.
|
5. Restart the Akkoma service.
|
||||||
|
|
||||||
[¹]: We assume the database name is "akkoma". If not, you can find the correct name in your configuration files.
|
[¹]: We assume the database name is "akkoma". If not, you can find the correct name in your config files.
|
||||||
[²]: If you have a from source installation, you need `config/prod.secret.exs` instead of `config/config.exs`. The `config/config.exs` file also exists, but in case of from source installations, it only contains the default values and it is tracked by Git, so you don't need to back it up.
|
[²]: If you've installed using OTP, you need `config/config.exs` instead of `config/prod.secret.exs`.
|
||||||
|
|
||||||
## Restore/Move
|
## Restore/Move
|
||||||
|
|
||||||
|
@ -17,16 +17,19 @@
|
||||||
2. Stop the Akkoma service.
|
2. Stop the Akkoma service.
|
||||||
3. Go to the working directory of Akkoma (default is `/opt/akkoma`)
|
3. Go to the working directory of Akkoma (default is `/opt/akkoma`)
|
||||||
4. Copy the above mentioned files back to their original position.
|
4. Copy the above mentioned files back to their original position.
|
||||||
5. Drop the existing database and user[¹]. `sudo -Hu postgres psql -c 'DROP DATABASE akkoma;';` `sudo -Hu postgres psql -c 'DROP USER akkoma;'`
|
5. Drop the existing database and user if restoring in-place[¹]. `sudo -Hu postgres psql -c 'DROP DATABASE akkoma;';` `sudo -Hu postgres psql -c 'DROP USER akkoma;'`
|
||||||
6. Restore the database schema and akkoma role[¹] (replace the password with the one you find in the configuration file), `sudo -Hu postgres psql -c "CREATE USER akkoma WITH ENCRYPTED PASSWORD '<database-password-wich-you-can-find-in-your-configuration-file>';"` `sudo -Hu postgres psql -c "CREATE DATABASE akkoma OWNER akkoma;"`.
|
6. Restore the database schema and akkoma role using either of the following options
|
||||||
|
* You can use the original `setup_db.psql` if you have it[²]: `sudo -Hu postgres psql -f config/setup_db.psql`.
|
||||||
|
* Or recreate the database and user yourself (replace the password with the one you find in the config file) `sudo -Hu postgres psql -c "CREATE USER akkoma WITH ENCRYPTED PASSWORD '<database-password-wich-you-can-find-in-your-config-file>'; CREATE DATABASE akkoma OWNER akkoma;"`.
|
||||||
7. Now restore the Akkoma instance's data into the empty database schema[¹]: `sudo -Hu postgres pg_restore -d akkoma -v -1 </path/to/backup_location/akkoma.pgdump>`
|
7. Now restore the Akkoma instance's data into the empty database schema[¹]: `sudo -Hu postgres pg_restore -d akkoma -v -1 </path/to/backup_location/akkoma.pgdump>`
|
||||||
8. If you installed a newer Akkoma version, you should run the database migrations `./bin/pleroma_ctl migrate`[²].
|
8. If you installed a newer Akkoma version, you should run `MIX_ENV=prod mix ecto.migrate`[³]. This task performs database migrations, if there were any.
|
||||||
9. Restart the Akkoma service.
|
9. Restart the Akkoma service.
|
||||||
10. Run `sudo -Hu postgres vacuumdb --all --analyze-in-stages`. This will quickly generate the statistics so that postgres can properly plan queries.
|
10. Run `sudo -Hu postgres vacuumdb --all --analyze-in-stages`. This will quickly generate the statistics so that postgres can properly plan queries.
|
||||||
11. If setting up on a new server, configure Nginx by using the `installation/nginx/akkoma.nginx` configuration sample or reference the Akkoma installation guide which contains the Nginx configuration instructions.
|
11. If setting up on a new server configure Nginx by using the `installation/akkoma.nginx` config sample or reference the Akkoma installation guide for your OS which contains the Nginx configuration instructions.
|
||||||
|
|
||||||
[¹]: We assume the database name and user are both "akkoma". If not, you can find the correct name in your configuration files.
|
[¹]: We assume the database name and user are both "akkoma". If not, you can find the correct name in your config files.
|
||||||
[²]: If you have a from source installation, the command is `MIX_ENV=prod mix ecto.migrate`. Note that we prefix with `MIX_ENV=prod` to use the `config/prod.secret.exs` configuration file.
|
[²]: You can recreate the `config/setup_db.psql` by running the `mix pleroma.instance gen` task again. You can ignore most of the questions, but make the database user, name, and password the same as found in your backed up config file. This will also create a new `config/generated_config.exs` file which you may delete as it is not needed.
|
||||||
|
[³]: Prefix with `MIX_ENV=prod` to run it using the production config file.
|
||||||
|
|
||||||
## Remove
|
## Remove
|
||||||
|
|
||||||
|
@ -42,16 +45,3 @@
|
||||||
8. Remove the dependencies that you don't need anymore (see installation guide). Make sure you don't remove packages that are still needed for other software that you have running!
|
8. Remove the dependencies that you don't need anymore (see installation guide). Make sure you don't remove packages that are still needed for other software that you have running!
|
||||||
|
|
||||||
[¹]: We assume the database name and user are both "akkoma". If not, you can find the correct name in your config files.
|
[¹]: We assume the database name and user are both "akkoma". If not, you can find the correct name in your config files.
|
||||||
|
|
||||||
## Docker installations
|
|
||||||
|
|
||||||
If running behind Docker, it is required to run the above commands inside of a running database container.
|
|
||||||
|
|
||||||
### Example
|
|
||||||
Running `docker compose run --rm db pg_dump <...>` will fail and return:
|
|
||||||
```
|
|
||||||
pg_dump: error: connection to server on socket "/run/postgresql/.s.PGSQL.5432" failed: No such file or directory
|
|
||||||
Is the server running locally and accepting connections on that socket?"
|
|
||||||
```
|
|
||||||
However, first starting just the database container with `docker compose up db -d`, and then running `docker compose exec db pg_dump -d akkoma --format=custom -f </your/backup/dir/akkoma.pgdump>` will successfully generate a database dump.
|
|
||||||
Then to make the file accessible on the host system you can run `docker compose cp db:</your/backup/dir/akkoma.pgdump> </your/target/location>` to copy if from the container.
|
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
If you run akkoma, you may be inclined to collect metrics to ensure your instance is running smoothly,
|
If you run akkoma, you may be inclined to collect metrics to ensure your instance is running smoothly,
|
||||||
and that there's nothing quietly failing in the background.
|
and that there's nothing quietly failing in the background.
|
||||||
|
|
||||||
To facilitate this, akkoma exposes a dashboard and prometheus metrics to be scraped.
|
To facilitate this, akkoma exposes prometheus metrics to be scraped.
|
||||||
|
|
||||||
## Prometheus
|
## Prometheus
|
||||||
|
|
||||||
|
@ -31,15 +31,3 @@ Once you have your token of the form `Bearer $ACCESS_TOKEN`, you can use that in
|
||||||
- targets:
|
- targets:
|
||||||
- example.com
|
- example.com
|
||||||
```
|
```
|
||||||
|
|
||||||
## Dashboard
|
|
||||||
|
|
||||||
Administrators can access a live dashboard under `/phoenix/live_dashboard`
|
|
||||||
giving an overview of uptime, software versions, database stats and more.
|
|
||||||
|
|
||||||
The dashboard also includes a variation of the prometheus metrics, however
|
|
||||||
they do not exactly match due to respective limitations of the dashboard
|
|
||||||
and the prometheus exporter.
|
|
||||||
Even more important, the dashboard collects metrics locally in the browser
|
|
||||||
only while the page is open and cannot give a view on their past history.
|
|
||||||
For proper monitoring it is recommended to set up prometheus.
|
|
||||||
|
|
|
@ -1,15 +1,12 @@
|
||||||
# Akkoma Clients
|
# Akkoma Clients
|
||||||
This is a list of clients that are known to work with Akkoma.
|
Note: Additional clients may work, but these are known to work with Akkoma.
|
||||||
|
Apps listed here might not support all of Akkoma's features.
|
||||||
!!! warning
|
|
||||||
**Clients listed here are not officially supported by the Akkoma project.**
|
|
||||||
Some Akkoma features may be unsupported by these clients.
|
|
||||||
|
|
||||||
## Multiplatform
|
## Multiplatform
|
||||||
### Kaiteki
|
### Kaiteki
|
||||||
- Homepage: <https://kaiteki.app/>
|
- Homepage: <https://kaiteki.app/>
|
||||||
- Source Code: <https://github.com/Kaiteki-Fedi/Kaiteki>
|
- Source Code: <https://github.com/Kaiteki-Fedi/Kaiteki>
|
||||||
- Contact: [@kaiteki@social.kaiteki.app](https://social.kaiteki.app/@kaiteki)
|
- Contact: [@kaiteki@fedi.software](https://fedi.software/@Kaiteki)
|
||||||
- Platforms: Web, Windows, Linux, Android
|
- Platforms: Web, Windows, Linux, Android
|
||||||
- Features: MastoAPI, Supports multiple backends
|
- Features: MastoAPI, Supports multiple backends
|
||||||
|
|
||||||
|
@ -41,6 +38,12 @@ This is a list of clients that are known to work with Akkoma.
|
||||||
- Platforms: Android
|
- Platforms: Android
|
||||||
- Features: MastoAPI, No Streaming, Emoji Reactions, Text Formatting, FE Stickers
|
- Features: MastoAPI, No Streaming, Emoji Reactions, Text Formatting, FE Stickers
|
||||||
|
|
||||||
|
### Fedi
|
||||||
|
- Homepage: <https://www.fediapp.com/>
|
||||||
|
- Source Code: Proprietary, but gratis
|
||||||
|
- Platforms: iOS, Android
|
||||||
|
- Features: MastoAPI, Pleroma-specific features like Reactions
|
||||||
|
|
||||||
### Tusky
|
### Tusky
|
||||||
- Homepage: <https://tuskyapp.github.io/>
|
- Homepage: <https://tuskyapp.github.io/>
|
||||||
- Source Code: <https://github.com/tuskyapp/Tusky>
|
- Source Code: <https://github.com/tuskyapp/Tusky>
|
||||||
|
@ -48,18 +51,12 @@ This is a list of clients that are known to work with Akkoma.
|
||||||
- Platforms: Android
|
- Platforms: Android
|
||||||
- Features: MastoAPI, No Streaming
|
- Features: MastoAPI, No Streaming
|
||||||
|
|
||||||
### Subway Tooter
|
|
||||||
- Source Code: <https://github.com/tateisu/SubwayTooter/>
|
|
||||||
- Contact: [@SubwayTooter@mastodon.juggler.jp](https://mastodon.juggler.jp/@SubwayTooter)
|
|
||||||
- Platforms: Android
|
|
||||||
- Features: MastoAPI, Editing, Emoji Reactions (including custom emoji)
|
|
||||||
|
|
||||||
## Alternative Web Interfaces
|
## Alternative Web Interfaces
|
||||||
### Enafore
|
### Pinafore
|
||||||
- An actively developed fork of Pinafore with improved Akkoma support
|
- Note: Pinafore is unmaintained (See [the author's original article](https://nolanlawson.com/2023/01/09/retiring-pinafore/) for details)
|
||||||
- Homepage: <https://enafore.social/>
|
- Homepage: <https://pinafore.social/>
|
||||||
- Source Code: <https://github.com/enafore/enafore>
|
- Source Code: <https://github.com/nolanlawson/pinafore>
|
||||||
- Contact: [@enfore@enafore.social](https://meta.enafore.social/@enafore)
|
- Contact: [@pinafore@mastodon.technology](https://mastodon.technology/users/pinafore)
|
||||||
- Features: MastoAPI, No Streaming
|
- Features: MastoAPI, No Streaming
|
||||||
|
|
||||||
### Sengi
|
### Sengi
|
||||||
|
|
|
@ -63,8 +63,6 @@ To add configuration to your config file, you can copy it from the base config.
|
||||||
* `local_bubble`: Array of domains representing instances closely related to yours. Used to populate the `bubble` timeline. e.g `["example.com"]`, (default: `[]`)
|
* `local_bubble`: Array of domains representing instances closely related to yours. Used to populate the `bubble` timeline. e.g `["example.com"]`, (default: `[]`)
|
||||||
* `languages`: List of Language Codes used by the instance. This is used to try and set a default language from the frontend. It will try and find the first match between the languages set here and the user's browser languages. It will default to the first language in this setting if there is no match.. (default `["en"]`)
|
* `languages`: List of Language Codes used by the instance. This is used to try and set a default language from the frontend. It will try and find the first match between the languages set here and the user's browser languages. It will default to the first language in this setting if there is no match.. (default `["en"]`)
|
||||||
* `export_prometheus_metrics`: Enable prometheus metrics, served at `/api/v1/akkoma/metrics`, requiring the `admin:metrics` oauth scope.
|
* `export_prometheus_metrics`: Enable prometheus metrics, served at `/api/v1/akkoma/metrics`, requiring the `admin:metrics` oauth scope.
|
||||||
* `privileged_staff`: Set to `true` to give moderators access to a few higher responsibility actions.
|
|
||||||
* `federated_timeline_available`: Set to `false` to remove access to the federated timeline for all users.
|
|
||||||
|
|
||||||
## :database
|
## :database
|
||||||
* `improved_hashtag_timeline`: Setting to force toggle / force disable improved hashtags timeline. `:enabled` forces hashtags to be fetched from `hashtags` table for hashtags timeline. `:disabled` forces object-embedded hashtags to be used (slower). Keep it `:auto` for automatic behaviour (it is auto-set to `:enabled` [unless overridden] when HashtagsTableMigrator completes).
|
* `improved_hashtag_timeline`: Setting to force toggle / force disable improved hashtags timeline. `:enabled` forces hashtags to be fetched from `hashtags` table for hashtags timeline. `:disabled` forces object-embedded hashtags to be used (slower). Keep it `:auto` for automatic behaviour (it is auto-set to `:enabled` [unless overridden] when HashtagsTableMigrator completes).
|
||||||
|
@ -106,60 +104,31 @@ To add configuration to your config file, you can copy it from the base config.
|
||||||
## Message rewrite facility
|
## Message rewrite facility
|
||||||
|
|
||||||
### :mrf
|
### :mrf
|
||||||
* `transparency`: Make the content of your Message Rewrite Facility settings public (via nodeinfo).
|
|
||||||
* `transparency_exclusions`: Exclude specific instance names from MRF transparency. The use of the exclusions feature will be disclosed in nodeinfo as a boolean value.
|
|
||||||
* `transparency_obfuscate_domains`: Show domains with `*` in the middle, to censor them if needed. For example, `ridingho.me` will show as `rid*****.me`
|
|
||||||
* `policies`: Message Rewrite Policy, either one or a list. Here are the ones available by default:
|
* `policies`: Message Rewrite Policy, either one or a list. Here are the ones available by default:
|
||||||
* `Pleroma.Web.ActivityPub.MRF.NoOpPolicy`: Doesn’t modify activities (default).
|
* `Pleroma.Web.ActivityPub.MRF.NoOpPolicy`: Doesn’t modify activities (default).
|
||||||
* `Pleroma.Web.ActivityPub.MRF.DropPolicy`: Drops all activities. It generally doesn’t makes sense to use in production.
|
* `Pleroma.Web.ActivityPub.MRF.DropPolicy`: Drops all activities. It generally doesn’t makes sense to use in production.
|
||||||
* `Pleroma.Web.ActivityPub.MRF.ActivityExpirationPolicy`: Sets a default expiration on all posts made by users of the local instance. Requires `Pleroma.Workers.PurgeExpiredActivity` to be enabled for processing the scheduled delections.
|
* `Pleroma.Web.ActivityPub.MRF.SimplePolicy`: Restrict the visibility of activities from certains instances (See [`:mrf_simple`](#mrf_simple)).
|
||||||
(See [`:mrf_activity_expiration`](#mrf_activity_expiration))
|
* `Pleroma.Web.ActivityPub.MRF.TagPolicy`: Applies policies to individual users based on tags, which can be set using pleroma-fe/admin-fe/any other app that supports Pleroma Admin API. For example it allows marking posts from individual users nsfw (sensitive).
|
||||||
* `Pleroma.Web.ActivityPub.MRF.AntiFollowbotPolicy`: Drops follow requests from followbots. Users can still allow bots to follow them by first following the bot.
|
* `Pleroma.Web.ActivityPub.MRF.SubchainPolicy`: Selectively runs other MRF policies when messages match (See [`:mrf_subchain`](#mrf_subchain)).
|
||||||
* `Pleroma.Web.ActivityPub.MRF.AntiLinkSpamPolicy`: Rejects posts from likely spambots by rejecting posts from new users that contain links.
|
* `Pleroma.Web.ActivityPub.MRF.RejectNonPublic`: Drops posts with non-public visibility settings (See [`:mrf_rejectnonpublic`](#mrf_rejectnonpublic)).
|
||||||
* `Pleroma.Web.ActivityPub.MRF.EnsureRePrepended`: Rewrites posts to ensure that replies to posts with subjects do not have an identical subject and instead begin with re:.
|
* `Pleroma.Web.ActivityPub.MRF.EnsureRePrepended`: Rewrites posts to ensure that replies to posts with subjects do not have an identical subject and instead begin with re:.
|
||||||
* `Pleroma.Web.ActivityPub.MRF.ForceBotUnlistedPolicy`: Makes all bot posts to disappear from public timelines.
|
* `Pleroma.Web.ActivityPub.MRF.AntiLinkSpamPolicy`: Rejects posts from likely spambots by rejecting posts from new users that contain links.
|
||||||
* `Pleroma.Web.ActivityPub.MRF.HellthreadPolicy`: Blocks messages with too many mentions.
|
|
||||||
(See [`mrf_hellthread`](#mrf_hellthread))
|
|
||||||
* `Pleroma.Web.ActivityPub.MRF.KeywordPolicy`: Rejects or removes from the federated timeline or replaces keywords. (See [`:mrf_keyword`](#mrf_keyword)).
|
|
||||||
* `Pleroma.Web.ActivityPub.MRF.MediaProxyWarmingPolicy`: Crawls attachments using their MediaProxy URLs so that the MediaProxy cache is primed.
|
* `Pleroma.Web.ActivityPub.MRF.MediaProxyWarmingPolicy`: Crawls attachments using their MediaProxy URLs so that the MediaProxy cache is primed.
|
||||||
* `Pleroma.Web.ActivityPub.MRF.MentionPolicy`: Drops posts mentioning configurable users. (See [`:mrf_mention`](#mrf_mention)).
|
* `Pleroma.Web.ActivityPub.MRF.MentionPolicy`: Drops posts mentioning configurable users. (See [`:mrf_mention`](#mrf_mention)).
|
||||||
* `Pleroma.Web.ActivityPub.MRF.NoEmptyPolicy`: Drops local activities which have no actual content.
|
|
||||||
(e.g. no attachments and only consists of mentions)
|
|
||||||
* `Pleroma.Web.ActivityPub.MRF.NoPlaceholderTextPolicy`: Strips content placeholders from posts
|
|
||||||
(such as the dot from mastodon)
|
|
||||||
* `Pleroma.Web.ActivityPub.MRF.ObjectAgePolicy`: Rejects or delists posts based on their age when received. (See [`:mrf_object_age`](#mrf_object_age)).
|
|
||||||
* `Pleroma.Web.ActivityPub.MRF.RejectNewlyCreatedAccountNotesPolicy`: Rejects posts of users the server only recently learned about for a while. Great to block spam accounts. (See [`:mrf_reject_newly_created_account_notes`](#mrf_reject_newly_created_account_notes))
|
|
||||||
* `Pleroma.Web.ActivityPub.MRF.RejectNonPublic`: Drops posts with non-public visibility settings (See [`:mrf_rejectnonpublic`](#mrf_rejectnonpublic)).
|
|
||||||
* `Pleroma.Web.ActivityPub.MRF.SimplePolicy`: Restrict the visibility of activities from certains instances (See [`:mrf_simple`](#mrf_simple)).
|
|
||||||
* `Pleroma.Web.ActivityPub.MRF.StealEmojiPolicy`: Steals all eligible emoji encountered in posts from remote instances
|
|
||||||
(See [`:mrf_steal_emoji`](#mrf_steal_emoji))
|
|
||||||
* `Pleroma.Web.ActivityPub.MRF.SubchainPolicy`: Selectively runs other MRF policies when messages match (See [`:mrf_subchain`](#mrf_subchain)).
|
|
||||||
* `Pleroma.Web.ActivityPub.MRF.TagPolicy`: Applies policies to individual users based on tags, which can be set using pleroma-fe/admin-fe/any other app that supports Pleroma Admin API. For example it allows marking posts from individual users nsfw (sensitive).
|
|
||||||
* `Pleroma.Web.ActivityPub.MRF.UserAllowListPolicy`: Drops all posts except from users specified in a list.
|
|
||||||
(See [`:mrf_user_allowlist`](#mrf_user_allowlist))
|
|
||||||
* `Pleroma.Web.ActivityPub.MRF.VocabularyPolicy`: Restricts activities to a configured set of vocabulary. (See [`:mrf_vocabulary`](#mrf_vocabulary)).
|
* `Pleroma.Web.ActivityPub.MRF.VocabularyPolicy`: Restricts activities to a configured set of vocabulary. (See [`:mrf_vocabulary`](#mrf_vocabulary)).
|
||||||
|
* `Pleroma.Web.ActivityPub.MRF.ObjectAgePolicy`: Rejects or delists posts based on their age when received. (See [`:mrf_object_age`](#mrf_object_age)).
|
||||||
Additionally the following MRFs will *always* be aplied and cannot be disabled:
|
* `Pleroma.Web.ActivityPub.MRF.ActivityExpirationPolicy`: Sets a default expiration on all posts made by users of the local instance. Requires `Pleroma.Workers.PurgeExpiredActivity` to be enabled for processing the scheduled delections.
|
||||||
|
* `Pleroma.Web.ActivityPub.MRF.ForceBotUnlistedPolicy`: Makes all bot posts to disappear from public timelines.
|
||||||
* `Pleroma.Web.ActivityPub.MRF.DirectMessageDisabledPolicy`: Strips users limiting who can send them DMs from the recipients of non-eligible DMs
|
* `Pleroma.Web.ActivityPub.MRF.FollowBotPolicy`: Automatically follows newly discovered users from the specified bot account. Local accounts, locked accounts, and users with "#nobot" in their bio are respected and excluded from being followed.
|
||||||
* `Pleroma.Web.ActivityPub.MRF.HashtagPolicy`: Depending on a post’s hashtags it can be rejected, get its sensitive flags force-enabled or removed from the global timeline
|
* `Pleroma.Web.ActivityPub.MRF.AntiFollowbotPolicy`: Drops follow requests from followbots. Users can still allow bots to follow them by first following the bot.
|
||||||
(See [`:mrf_hashtag`](#mrf_hashtag))
|
* `Pleroma.Web.ActivityPub.MRF.KeywordPolicy`: Rejects or removes from the federated timeline or replaces keywords. (See [`:mrf_keyword`](#mrf_keyword)).
|
||||||
* `Pleroma.Web.ActivityPub.MRF.InlineQuotePolicy`: Append a link to a post that quotes another post with the link to the quoted post, to ensure that software that does not understand quotes can have full context.
|
* `Pleroma.Web.ActivityPub.MRF.NormalizeMarkup`: Pass inbound HTML through a scrubber to make sure it doesn't have anything unusual in it. On by default, cannot be turned off.
|
||||||
(See [`:mrf_inline_quote`](#mrf_inline_quote))
|
* `Pleroma.Web.ActivityPub.MRF.InlineQuotePolicy`: Append a link to a post that quotes another post with the link to the quoted post, to ensure that software that does not understand quotes can have full context. On by default, cannot be turned off.
|
||||||
* `Pleroma.Web.ActivityPub.MRF.NormalizeMarkup`: Pass inbound HTML through a scrubber to make sure it doesn't have anything unusual in it.
|
* `transparency`: Make the content of your Message Rewrite Facility settings public (via nodeinfo).
|
||||||
(See [`:mrf_normalize_markup`](#mrf_normalize_markup))
|
* `transparency_exclusions`: Exclude specific instance names from MRF transparency. The use of the exclusions feature will be disclosed in nodeinfo as a boolean value.
|
||||||
|
* `transparency_obfuscate_domains`: Show domains with `*` in the middle, to censor them if needed. For example, `ridingho.me` will show as `rid*****.me`
|
||||||
|
|
||||||
## Federation
|
## Federation
|
||||||
### :activitypub
|
|
||||||
* `unfollow_blocked`: Whether blocks result in people getting unfollowed
|
|
||||||
* `outgoing_blocks`: Whether to federate blocks to other instances
|
|
||||||
* `blockers_visible`: Whether a user can see the posts of users who blocked them
|
|
||||||
* `deny_follow_blocked`: Whether to disallow following an account that has blocked the user in question
|
|
||||||
* `sign_object_fetches`: Sign object fetches with HTTP signatures
|
|
||||||
* `authorized_fetch_mode`: Require HTTP signatures for AP fetches
|
|
||||||
* `max_collection_objects`: The maximum number of objects to fetch from a remote AP collection.
|
|
||||||
|
|
||||||
### MRF policies
|
### MRF policies
|
||||||
|
|
||||||
!!! note
|
!!! note
|
||||||
|
@ -175,7 +144,6 @@ Additionally the following MRFs will *always* be aplied and cannot be disabled:
|
||||||
* `report_removal`: List of instances to reject reports from and the reason for doing so.
|
* `report_removal`: List of instances to reject reports from and the reason for doing so.
|
||||||
* `avatar_removal`: List of instances to strip avatars from and the reason for doing so.
|
* `avatar_removal`: List of instances to strip avatars from and the reason for doing so.
|
||||||
* `banner_removal`: List of instances to strip banners from and the reason for doing so.
|
* `banner_removal`: List of instances to strip banners from and the reason for doing so.
|
||||||
* `background_removal`: List of instances to strip user backgrounds from and the reason for doing so.
|
|
||||||
* `reject_deletes`: List of instances to reject deletions from and the reason for doing so.
|
* `reject_deletes`: List of instances to reject deletions from and the reason for doing so.
|
||||||
|
|
||||||
#### :mrf_subchain
|
#### :mrf_subchain
|
||||||
|
@ -238,9 +206,7 @@ config :pleroma, :mrf_user_allowlist, %{
|
||||||
#### :mrf_steal_emoji
|
#### :mrf_steal_emoji
|
||||||
* `hosts`: List of hosts to steal emojis from
|
* `hosts`: List of hosts to steal emojis from
|
||||||
* `rejected_shortcodes`: Regex-list of shortcodes to reject
|
* `rejected_shortcodes`: Regex-list of shortcodes to reject
|
||||||
* `size_limit`: File size limit (in bytes), checked before download if possible (and remote server honest),
|
* `size_limit`: File size limit (in bytes), checked before an emoji is saved to the disk
|
||||||
otherwise or again checked before saving emoji to the disk
|
|
||||||
* `download_unknown_size`: whether to download an emoji when the remote server doesn’t report its size in advance
|
|
||||||
|
|
||||||
#### :mrf_activity_expiration
|
#### :mrf_activity_expiration
|
||||||
|
|
||||||
|
@ -256,24 +222,14 @@ Notes:
|
||||||
- The hashtags in the configuration do not have a leading `#`.
|
- The hashtags in the configuration do not have a leading `#`.
|
||||||
- This MRF Policy is always enabled, if you want to disable it you have to set empty lists
|
- This MRF Policy is always enabled, if you want to disable it you have to set empty lists
|
||||||
|
|
||||||
#### :mrf_reject_newly_created_account_notes
|
### :activitypub
|
||||||
After initially encountering an user, all their posts
|
* `unfollow_blocked`: Whether blocks result in people getting unfollowed
|
||||||
will be rejected for the configured time (in seconds).
|
* `outgoing_blocks`: Whether to federate blocks to other instances
|
||||||
Only drops posts. Follows, reposts, etc. are not affected.
|
* `blockers_visible`: Whether a user can see the posts of users who blocked them
|
||||||
|
* `deny_follow_blocked`: Whether to disallow following an account that has blocked the user in question
|
||||||
* `age`: Time below which to reject (in seconds)
|
* `sign_object_fetches`: Sign object fetches with HTTP signatures
|
||||||
|
* `authorized_fetch_mode`: Require HTTP signatures for AP fetches
|
||||||
An example: (86400 seconds = 24 hours)
|
* `max_collection_objects`: The maximum number of objects to fetch from a remote AP collection.
|
||||||
|
|
||||||
```elixir
|
|
||||||
config :pleroma, :mrf_reject_newly_created_account_notes, age: 86400
|
|
||||||
```
|
|
||||||
|
|
||||||
#### :mrf_inline_quote
|
|
||||||
* `prefix`: what prefix to prepend to quoted URLs
|
|
||||||
|
|
||||||
#### :mrf_normalize_markup
|
|
||||||
* `scrub_policy`: the scrubbing module to use (by default a built-in HTML sanitiser)
|
|
||||||
|
|
||||||
## Pleroma.User
|
## Pleroma.User
|
||||||
|
|
||||||
|
@ -338,7 +294,7 @@ config :pleroma, :frontends,
|
||||||
|
|
||||||
* `:primary` - The frontend that will be served at `/`
|
* `:primary` - The frontend that will be served at `/`
|
||||||
* `:admin` - The frontend that will be served at `/pleroma/admin`
|
* `:admin` - The frontend that will be served at `/pleroma/admin`
|
||||||
* `:swagger` - Config for developers to act as an API reference to be served at `/pleroma/swaggerui/` (trailing slash _needed_). Disabled by default.
|
* `:swagger` - Config for developers to act as an API reference to be served at `/akkoma/swaggerui/` (trailing slash _needed_). Disabled by default.
|
||||||
* `:mastodon` - The mastodon-fe configuration. This shouldn't need to be changed. This is served at `/web` when installed.
|
* `:mastodon` - The mastodon-fe configuration. This shouldn't need to be changed. This is served at `/web` when installed.
|
||||||
|
|
||||||
### :static\_fe
|
### :static\_fe
|
||||||
|
@ -400,8 +356,7 @@ This section describe PWA manifest instance-specific values. Currently this opti
|
||||||
## :media_proxy
|
## :media_proxy
|
||||||
|
|
||||||
* `enabled`: Enables proxying of remote media to the instance’s proxy
|
* `enabled`: Enables proxying of remote media to the instance’s proxy
|
||||||
* `base_url`: The base URL to access a user-uploaded file.
|
* `base_url`: The base URL to access a user-uploaded file. Useful when you want to proxy the media files via another host/CDN fronts.
|
||||||
Using a (sub)domain distinct from the instance endpoint is **strongly** recommended.
|
|
||||||
* `proxy_opts`: All options defined in `Pleroma.ReverseProxy` documentation, defaults to `[max_body_length: (25*1_048_576)]`.
|
* `proxy_opts`: All options defined in `Pleroma.ReverseProxy` documentation, defaults to `[max_body_length: (25*1_048_576)]`.
|
||||||
* `whitelist`: List of hosts with scheme to bypass the mediaproxy (e.g. `https://example.com`)
|
* `whitelist`: List of hosts with scheme to bypass the mediaproxy (e.g. `https://example.com`)
|
||||||
* `invalidation`: options for remove media from cache after delete object:
|
* `invalidation`: options for remove media from cache after delete object:
|
||||||
|
@ -602,9 +557,9 @@ the source code is here: [kocaptcha](https://github.com/koto-bank/kocaptcha). Th
|
||||||
|
|
||||||
* `uploader`: Which one of the [uploaders](#uploaders) to use.
|
* `uploader`: Which one of the [uploaders](#uploaders) to use.
|
||||||
* `filters`: List of [upload filters](#upload-filters) to use.
|
* `filters`: List of [upload filters](#upload-filters) to use.
|
||||||
* `link_name`: When enabled Akkoma will add a `name` parameter to the url of the upload, for example `https://instance.tld/media/corndog.png?name=corndog.png`. This is needed to provide the correct filename in Content-Disposition headers
|
* `link_name`: When enabled Akkoma will add a `name` parameter to the url of the upload, for example `https://instance.tld/media/corndog.png?name=corndog.png`. This is needed to provide the correct filename in Content-Disposition headers when using filters like `Pleroma.Upload.Filter.Dedupe`
|
||||||
* `base_url`: The base URL to access a user-uploaded file; MUST be configured explicitly.
|
* `base_url`: The base URL to access a user-uploaded file. Useful when you want to host the media files via another domain or are using a 3rd party S3 provider.
|
||||||
Using a (sub)domain distinct from the instance endpoint is **strongly** recommended. A good value might be `https://media.myakkoma.instance/media/`.
|
* `proxy_remote`: If you're using a remote uploader, Akkoma will proxy media requests instead of redirecting to it.
|
||||||
* `proxy_opts`: Proxy options, see `Pleroma.ReverseProxy` documentation.
|
* `proxy_opts`: Proxy options, see `Pleroma.ReverseProxy` documentation.
|
||||||
* `filename_display_max_length`: Set max length of a filename to display. 0 = no limit. Default: 30.
|
* `filename_display_max_length`: Set max length of a filename to display. 0 = no limit. Default: 30.
|
||||||
|
|
||||||
|
@ -643,29 +598,20 @@ config :ex_aws, :s3,
|
||||||
|
|
||||||
### Upload filters
|
### Upload filters
|
||||||
|
|
||||||
#### Pleroma.Upload.Filter.Dedupe
|
|
||||||
|
|
||||||
**Always** active; cannot be turned off.
|
|
||||||
Renames files to their hash and prevents duplicate files filling up the disk.
|
|
||||||
No specific configuration.
|
|
||||||
|
|
||||||
#### Pleroma.Upload.Filter.AnonymizeFilename
|
#### Pleroma.Upload.Filter.AnonymizeFilename
|
||||||
|
|
||||||
This filter replaces the declared filename (not the path) of an upload.
|
This filter replaces the filename (not the path) of an upload. For complete obfuscation, add
|
||||||
|
`Pleroma.Upload.Filter.Dedupe` before AnonymizeFilename.
|
||||||
|
|
||||||
* `text`: Text to replace filenames in links. If empty, `{random}.extension` will be used. You can get the original filename extension by using `{extension}`, for example `custom-file-name.{extension}`.
|
* `text`: Text to replace filenames in links. If empty, `{random}.extension` will be used. You can get the original filename extension by using `{extension}`, for example `custom-file-name.{extension}`.
|
||||||
|
|
||||||
#### Pleroma.Upload.Filter.Exiftool.StripMetadata
|
#### Pleroma.Upload.Filter.Dedupe
|
||||||
|
|
||||||
This filter strips metadata with Exiftool leaving color profiles and orientation intact.
|
No specific configuration.
|
||||||
|
|
||||||
* `purge`: List of Exiftool tag names or tag group names to purge
|
#### Pleroma.Upload.Filter.Exiftool
|
||||||
* `preserve`: List of Exiftool tag names or tag group names to preserve even if they occur in the purge list
|
|
||||||
|
|
||||||
|
This filter only strips the GPS and location metadata with Exiftool leaving color profiles and attributes intact.
|
||||||
#### Pleroma.Upload.Filter.Exiftool.ReadDescription
|
|
||||||
|
|
||||||
This filter reads the ImageDescription and iptc:Caption-Abstract fields with Exiftool so clients can prefill the media description field.
|
|
||||||
|
|
||||||
No specific configuration.
|
No specific configuration.
|
||||||
|
|
||||||
|
@ -1012,15 +958,6 @@ config :ueberauth, Ueberauth,
|
||||||
]
|
]
|
||||||
```
|
```
|
||||||
|
|
||||||
You may also need to set up your frontend to use oauth logins. For example, for `akkoma-fe`:
|
|
||||||
|
|
||||||
```elixir
|
|
||||||
config :pleroma, :frontend_configurations,
|
|
||||||
pleroma_fe: %{
|
|
||||||
loginMethod: "token"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Link parsing
|
## Link parsing
|
||||||
|
|
||||||
### :uri_schemes
|
### :uri_schemes
|
||||||
|
|
|
@ -60,4 +60,4 @@ config :pleroma, :frontends,
|
||||||
|
|
||||||
Then run the [pleroma.frontend cli task](../../administration/CLI_tasks/frontend) with the name of `swagger-ui` to install the distribution files.
|
Then run the [pleroma.frontend cli task](../../administration/CLI_tasks/frontend) with the name of `swagger-ui` to install the distribution files.
|
||||||
|
|
||||||
You will now be able to view documentation at `/pleroma/swaggerui`
|
You will now be able to view documentation at `/akkoma/swaggerui`
|
||||||
|
|
|
@ -17,16 +17,6 @@ This sets the Akkoma application server to only listen to the localhost interfac
|
||||||
|
|
||||||
This sets the `secure` flag on Akkoma’s session cookie. This makes sure, that the cookie is only accepted over encrypted HTTPs connections. This implicitly renames the cookie from `pleroma_key` to `__Host-pleroma-key` which enforces some restrictions. (see [cookie prefixes](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie#Cookie_prefixes))
|
This sets the `secure` flag on Akkoma’s session cookie. This makes sure, that the cookie is only accepted over encrypted HTTPs connections. This implicitly renames the cookie from `pleroma_key` to `__Host-pleroma-key` which enforces some restrictions. (see [cookie prefixes](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Set-Cookie#Cookie_prefixes))
|
||||||
|
|
||||||
### `Pleroma.Upload, :uploader, :base_url`
|
|
||||||
|
|
||||||
> Recommended value: *anything on a different domain than the instance endpoint; e.g. https://media.myinstance.net/*
|
|
||||||
|
|
||||||
Uploads are user controlled and (unless you’re running a true single-user
|
|
||||||
instance) should therefore not be considered trusted. But the domain is used
|
|
||||||
as a pivilege boundary e.g. by HTTP content security policy and ActivityPub.
|
|
||||||
Having uploads on the same domain enabled several past vulnerabilities
|
|
||||||
able to be exploited by malicious users.
|
|
||||||
|
|
||||||
### `:http_security`
|
### `:http_security`
|
||||||
|
|
||||||
> Recommended value: `true`
|
> Recommended value: `true`
|
||||||
|
|
|
@ -6,18 +6,29 @@ With the `mediaproxy` function you can use nginx to cache this content, so users
|
||||||
|
|
||||||
## Activate it
|
## Activate it
|
||||||
|
|
||||||
* Set up a subdomain for the proxy with its nginx config on the same machine
|
* Edit your nginx config and add the following location:
|
||||||
* Edit the nginx config for the upload/MediaProxy subdomain to point to the subdomain that has been set up
|
```
|
||||||
|
location /proxy {
|
||||||
|
proxy_cache akkoma_media_cache;
|
||||||
|
proxy_cache_lock on;
|
||||||
|
proxy_pass http://localhost:4000;
|
||||||
|
}
|
||||||
|
```
|
||||||
|
Also add the following on top of the configuration, outside of the `server` block:
|
||||||
|
```
|
||||||
|
proxy_cache_path /tmp/akkoma-media-cache levels=1:2 keys_zone=akkoma_media_cache:10m max_size=10g inactive=720m use_temp_path=off;
|
||||||
|
```
|
||||||
|
If you came here from one of the installation guides, take a look at the example configuration `/installation/nginx/akkoma.nginx`, where this part is already included.
|
||||||
|
|
||||||
* Append the following to your `prod.secret.exs` or `dev.secret.exs` (depends on which mode your instance is running):
|
* Append the following to your `prod.secret.exs` or `dev.secret.exs` (depends on which mode your instance is running):
|
||||||
```elixir
|
```
|
||||||
# Replace media.example.td with the subdomain you set up earlier
|
|
||||||
config :pleroma, :media_proxy,
|
config :pleroma, :media_proxy,
|
||||||
enabled: true,
|
enabled: true,
|
||||||
proxy_opts: [
|
proxy_opts: [
|
||||||
redirect_on_failure: true
|
redirect_on_failure: true
|
||||||
],
|
]
|
||||||
base_url: "https://media.example.tld"
|
#base_url: "https://cache.akkoma.social"
|
||||||
```
|
```
|
||||||
You **really** should use a subdomain to serve proxied files; while we will fix bugs resulting from this, serving arbitrary remote content on your main domain namespace is a significant attack surface.
|
If you want to use a subdomain to serve the files, uncomment `base_url`, change the url and add a comma after `true` in the previous line.
|
||||||
|
|
||||||
* Restart nginx and Akkoma
|
* Restart nginx and Akkoma
|
||||||
|
|
|
@ -60,7 +60,7 @@ Example of `my-awesome-theme.json` where we add the name "My Awesome Theme"
|
||||||
|
|
||||||
### Set as default theme
|
### Set as default theme
|
||||||
|
|
||||||
Now we can set the new theme as default in the [Pleroma FE configuration](https://docs-fe.akkoma.dev/stable/CONFIGURATION/).
|
Now we can set the new theme as default in the [Pleroma FE configuration](https://docs-fe.akkoma.dev/stable/CONFIGURATION).
|
||||||
|
|
||||||
Example of adding the new theme in the back-end config files
|
Example of adding the new theme in the back-end config files
|
||||||
```elixir
|
```elixir
|
||||||
|
|
|
@ -130,26 +130,59 @@ config :pleroma, :http_security,
|
||||||
enabled: false
|
enabled: false
|
||||||
```
|
```
|
||||||
|
|
||||||
In the Nginx config, add the following into the `location /` block:
|
Use this as the Nginx config:
|
||||||
```nginx
|
```
|
||||||
|
proxy_cache_path /tmp/akkoma-media-cache levels=1:2 keys_zone=akkoma_media_cache:10m max_size=10g inactive=720m use_temp_path=off;
|
||||||
|
# The above already exists in a clearnet instance's config.
|
||||||
|
# If not, add it.
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 127.0.0.1:14447;
|
||||||
|
server_name youri2paddress;
|
||||||
|
|
||||||
|
# Comment to enable logs
|
||||||
|
access_log /dev/null;
|
||||||
|
error_log /dev/null;
|
||||||
|
|
||||||
|
gzip_vary on;
|
||||||
|
gzip_proxied any;
|
||||||
|
gzip_comp_level 6;
|
||||||
|
gzip_buffers 16 8k;
|
||||||
|
gzip_http_version 1.1;
|
||||||
|
gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript application/activity+json application/atom+xml;
|
||||||
|
|
||||||
|
client_max_body_size 16m;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
|
||||||
add_header X-XSS-Protection "0";
|
add_header X-XSS-Protection "0";
|
||||||
add_header X-Permitted-Cross-Domain-Policies none;
|
add_header X-Permitted-Cross-Domain-Policies none;
|
||||||
add_header X-Frame-Options DENY;
|
add_header X-Frame-Options DENY;
|
||||||
add_header X-Content-Type-Options nosniff;
|
add_header X-Content-Type-Options nosniff;
|
||||||
add_header Referrer-Policy same-origin;
|
add_header Referrer-Policy same-origin;
|
||||||
```
|
|
||||||
|
|
||||||
Change the `listen` directive to the following:
|
proxy_http_version 1.1;
|
||||||
```nginx
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
listen 127.0.0.1:14447;
|
proxy_set_header Connection "upgrade";
|
||||||
```
|
proxy_set_header Host $http_host;
|
||||||
|
|
||||||
Set `server_name` to your i2p address.
|
proxy_pass http://localhost:4000;
|
||||||
|
|
||||||
Reload Nginx:
|
client_max_body_size 16m;
|
||||||
|
}
|
||||||
|
|
||||||
|
location /proxy {
|
||||||
|
proxy_cache akkoma_media_cache;
|
||||||
|
proxy_cache_lock on;
|
||||||
|
proxy_ignore_client_abort on;
|
||||||
|
proxy_pass http://localhost:4000;
|
||||||
|
}
|
||||||
|
}
|
||||||
```
|
```
|
||||||
systemctl restart i2pd.service --no-block
|
reload Nginx:
|
||||||
systemctl reload nginx.service
|
```
|
||||||
|
systemctl stop i2pd.service --no-block
|
||||||
|
systemctl start i2pd.service
|
||||||
```
|
```
|
||||||
*Notice:* The stop command initiates a graceful shutdown process, i2pd stops after finishing to route transit tunnels (maximum 10 minutes).
|
*Notice:* The stop command initiates a graceful shutdown process, i2pd stops after finishing to route transit tunnels (maximum 10 minutes).
|
||||||
|
|
||||||
|
|
|
@ -35,7 +35,6 @@ Once `SimplePolicy` is enabled, you can configure various groups in the `:mrf_si
|
||||||
* `media_removal`: Servers in this group will have media stripped from incoming messages.
|
* `media_removal`: Servers in this group will have media stripped from incoming messages.
|
||||||
* `avatar_removal`: Avatars from these servers will be stripped from incoming messages.
|
* `avatar_removal`: Avatars from these servers will be stripped from incoming messages.
|
||||||
* `banner_removal`: Banner images from these servers will be stripped from incoming messages.
|
* `banner_removal`: Banner images from these servers will be stripped from incoming messages.
|
||||||
* `background_removal`: User background images from these servers will be stripped from incoming messages.
|
|
||||||
* `report_removal`: Servers in this group will have their reports (flags) rejected.
|
* `report_removal`: Servers in this group will have their reports (flags) rejected.
|
||||||
* `federated_timeline_removal`: Servers in this group will have their messages unlisted from the public timelines by flipping the `to` and `cc` fields.
|
* `federated_timeline_removal`: Servers in this group will have their messages unlisted from the public timelines by flipping the `to` and `cc` fields.
|
||||||
* `reject_deletes`: Deletion requests will be rejected from these servers.
|
* `reject_deletes`: Deletion requests will be rejected from these servers.
|
||||||
|
@ -62,32 +61,6 @@ config :pleroma, :mrf_simple,
|
||||||
|
|
||||||
The effects of MRF policies can be very drastic. It is important to use this functionality carefully. Always try to talk to an admin before writing an MRF policy concerning their instance.
|
The effects of MRF policies can be very drastic. It is important to use this functionality carefully. Always try to talk to an admin before writing an MRF policy concerning their instance.
|
||||||
|
|
||||||
## Hiding or Obfuscating Policies
|
|
||||||
|
|
||||||
You can opt out of publicly displaying all MRF policies or only hide or obfuscate selected domains.
|
|
||||||
|
|
||||||
To just hide everything set:
|
|
||||||
|
|
||||||
```elixir
|
|
||||||
config :pleroma, :mrf,
|
|
||||||
...
|
|
||||||
transparency: false,
|
|
||||||
```
|
|
||||||
|
|
||||||
To hide or obfuscate only select entries, use:
|
|
||||||
|
|
||||||
```elixir
|
|
||||||
config :pleroma, :mrf,
|
|
||||||
...
|
|
||||||
transparency_obfuscate_domains: ["handholdi.ng", "badword.com"],
|
|
||||||
transparency_exclusions: [{"ghost.club", "even a fragment is too spoopy for humans"}]
|
|
||||||
```
|
|
||||||
|
|
||||||
## More MRF Policies
|
|
||||||
|
|
||||||
See the [documentation cheatsheet](cheatsheet.md)
|
|
||||||
for all available MRF policies and their options.
|
|
||||||
|
|
||||||
## Writing your own MRF Policy
|
## Writing your own MRF Policy
|
||||||
|
|
||||||
As discussed above, the MRF system is a modular system that supports pluggable policies. This means that an admin may write a custom MRF policy in Elixir or any other language that runs on the Erlang VM, by specifying the module name in the `policies` config setting.
|
As discussed above, the MRF system is a modular system that supports pluggable policies. This means that an admin may write a custom MRF policy in Elixir or any other language that runs on the Erlang VM, by specifying the module name in the `policies` config setting.
|
||||||
|
|
|
@ -74,23 +74,56 @@ config :pleroma, :http_security,
|
||||||
enabled: false
|
enabled: false
|
||||||
```
|
```
|
||||||
|
|
||||||
In the Nginx config, add the following into the `location /` block:
|
Use this as the Nginx config:
|
||||||
```nginx
|
```
|
||||||
|
proxy_cache_path /tmp/akkoma-media-cache levels=1:2 keys_zone=akkoma_media_cache:10m max_size=10g inactive=720m use_temp_path=off;
|
||||||
|
# The above already exists in a clearnet instance's config.
|
||||||
|
# If not, add it.
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 127.0.0.1:8099;
|
||||||
|
server_name youronionaddress;
|
||||||
|
|
||||||
|
# Comment to enable logs
|
||||||
|
access_log /dev/null;
|
||||||
|
error_log /dev/null;
|
||||||
|
|
||||||
|
gzip_vary on;
|
||||||
|
gzip_proxied any;
|
||||||
|
gzip_comp_level 6;
|
||||||
|
gzip_buffers 16 8k;
|
||||||
|
gzip_http_version 1.1;
|
||||||
|
gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript application/activity+json application/atom+xml;
|
||||||
|
|
||||||
|
client_max_body_size 16m;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
|
||||||
add_header X-XSS-Protection "0";
|
add_header X-XSS-Protection "0";
|
||||||
add_header X-Permitted-Cross-Domain-Policies none;
|
add_header X-Permitted-Cross-Domain-Policies none;
|
||||||
add_header X-Frame-Options DENY;
|
add_header X-Frame-Options DENY;
|
||||||
add_header X-Content-Type-Options nosniff;
|
add_header X-Content-Type-Options nosniff;
|
||||||
add_header Referrer-Policy same-origin;
|
add_header Referrer-Policy same-origin;
|
||||||
|
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_set_header Connection "upgrade";
|
||||||
|
proxy_set_header Host $http_host;
|
||||||
|
|
||||||
|
proxy_pass http://localhost:4000;
|
||||||
|
|
||||||
|
client_max_body_size 16m;
|
||||||
|
}
|
||||||
|
|
||||||
|
location /proxy {
|
||||||
|
proxy_cache akkoma_media_cache;
|
||||||
|
proxy_cache_lock on;
|
||||||
|
proxy_ignore_client_abort on;
|
||||||
|
proxy_pass http://localhost:4000;
|
||||||
|
}
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
reload Nginx:
|
||||||
Change the `listen` directive to the following:
|
|
||||||
```nginx
|
|
||||||
listen 127.0.0.1:8099;
|
|
||||||
```
|
|
||||||
|
|
||||||
Set the `server_name` to your onion address.
|
|
||||||
|
|
||||||
Reload Nginx:
|
|
||||||
```
|
```
|
||||||
systemctl reload nginx
|
systemctl reload nginx
|
||||||
```
|
```
|
||||||
|
|
|
@ -25,14 +25,11 @@ Tuning the BEAM requires you provide a config file normally called [vm.args](htt
|
||||||
|
|
||||||
`ExecStart=/usr/bin/elixir --erl '-args_file /opt/akkoma/config/vm.args' -S /usr/bin/mix phx.server`
|
`ExecStart=/usr/bin/elixir --erl '-args_file /opt/akkoma/config/vm.args' -S /usr/bin/mix phx.server`
|
||||||
|
|
||||||
If using an OTP release, set the `RELEASE_VM_ARGS` environment variable to the path to the vm.args file.
|
|
||||||
|
|
||||||
Check your OS documentation to adopt a similar strategy on other platforms.
|
Check your OS documentation to adopt a similar strategy on other platforms.
|
||||||
|
|
||||||
### Virtual Machine and/or few CPU cores
|
### Virtual Machine and/or few CPU cores
|
||||||
|
|
||||||
Disable the busy-waiting. This should generally be done if you're on a platform that does burst scheduling, like AWS, or if you're running other
|
Disable the busy-waiting. This should generally only be done if you're on a platform that does burst scheduling, like AWS.
|
||||||
services on the same machine.
|
|
||||||
|
|
||||||
**vm.args:**
|
**vm.args:**
|
||||||
|
|
||||||
|
@ -42,8 +39,6 @@ services on the same machine.
|
||||||
+sbwtdio none
|
+sbwtdio none
|
||||||
```
|
```
|
||||||
|
|
||||||
These settings are enabled by default for OTP releases
|
|
||||||
|
|
||||||
### Dedicated Hardware
|
### Dedicated Hardware
|
||||||
|
|
||||||
Enable more busy waiting, increase the internal maximum limit of BEAM processes and ports. You can use this if you run on dedicated hardware, but it is not necessary.
|
Enable more busy waiting, increase the internal maximum limit of BEAM processes and ports. You can use this if you run on dedicated hardware, but it is not necessary.
|
||||||
|
|
|
@ -4,10 +4,47 @@ Akkoma performance is largely dependent on performance of the underlying databas
|
||||||
|
|
||||||
## PGTune
|
## PGTune
|
||||||
|
|
||||||
[PgTune](https://pgtune.leopard.in.ua) can be used to get recommended settings. Make sure to set the DB type to "Online transaction processing system" for optimal performance. Also set the number of connections to between 25 and 30. This will allow each connection to have access to more resources while still leaving some room for running maintenance tasks while the instance is still running.
|
[PgTune](https://pgtune.leopard.in.ua) can be used to get recommended settings. Be sure to set "Number of Connections" to 20, otherwise it might produce settings hurtful to database performance. It is also recommended to not use "Network Storage" option.
|
||||||
|
|
||||||
It is also recommended to not use "Network Storage" option.
|
If your server runs other services, you may want to take that into account. E.g. if you have 4G ram, but 1G of it is already used for other services, it may be better to tell PGTune you only have 3G. In the end, PGTune only provides recomended settings, you can always try to finetune further.
|
||||||
|
|
||||||
If your server runs other services, you may want to take that into account. E.g. if you have 4G ram, but 1G of it is already used for other services, it may be better to tell PGTune you only have 3G.
|
### Example configurations
|
||||||
|
|
||||||
In the end, PGTune only provides recomended settings, you can always try to finetune further.
|
Here are some configuration suggestions for PostgreSQL 10+.
|
||||||
|
|
||||||
|
#### 1GB RAM, 1 CPU
|
||||||
|
```
|
||||||
|
shared_buffers = 256MB
|
||||||
|
effective_cache_size = 768MB
|
||||||
|
maintenance_work_mem = 64MB
|
||||||
|
work_mem = 13107kB
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 2GB RAM, 2 CPU
|
||||||
|
```
|
||||||
|
shared_buffers = 512MB
|
||||||
|
effective_cache_size = 1536MB
|
||||||
|
maintenance_work_mem = 128MB
|
||||||
|
work_mem = 26214kB
|
||||||
|
max_worker_processes = 2
|
||||||
|
max_parallel_workers_per_gather = 1
|
||||||
|
max_parallel_workers = 2
|
||||||
|
```
|
||||||
|
|
||||||
|
## Disable generic query plans
|
||||||
|
|
||||||
|
When PostgreSQL receives a query, it decides on a strategy for searching the requested data, this is called a query plan. The query planner has two modes: generic and custom. Generic makes a plan for all queries of the same shape, ignoring the parameters, which is then cached and reused. Custom, on the contrary, generates a unique query plan based on query parameters.
|
||||||
|
|
||||||
|
By default PostgreSQL has an algorithm to decide which mode is more efficient for particular query, however this algorithm has been observed to be wrong on some of the queries Akkoma sends, leading to serious performance loss. Therefore, it is recommended to disable generic mode.
|
||||||
|
|
||||||
|
|
||||||
|
Akkoma already avoids generic query plans by default, however the method it uses is not the most efficient because it needs to be compatible with all supported PostgreSQL versions. For PostgreSQL 12 and higher additional performance can be gained by adding the following to Akkoma configuration:
|
||||||
|
```elixir
|
||||||
|
config :pleroma, Pleroma.Repo,
|
||||||
|
prepare: :named,
|
||||||
|
parameters: [
|
||||||
|
plan_cache_mode: "force_custom_plan"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
A more detailed explaination of the issue can be found at <https://blog.soykaf.com/post/postgresql-elixir-troubles/>.
|
||||||
|
|
|
@ -33,7 +33,6 @@ indexes faster when it can process many posts in a single batch.
|
||||||
> config :pleroma, Pleroma.Search.Meilisearch,
|
> config :pleroma, Pleroma.Search.Meilisearch,
|
||||||
> url: "http://127.0.0.1:7700/",
|
> url: "http://127.0.0.1:7700/",
|
||||||
> private_key: "private key",
|
> private_key: "private key",
|
||||||
> search_key: "search key",
|
|
||||||
> initial_indexing_chunk_size: 100_000
|
> initial_indexing_chunk_size: 100_000
|
||||||
|
|
||||||
Information about setting up meilisearch can be found in the
|
Information about setting up meilisearch can be found in the
|
||||||
|
@ -46,7 +45,7 @@ is hardly usable on a somewhat big instance.
|
||||||
### Private key authentication (optional)
|
### Private key authentication (optional)
|
||||||
|
|
||||||
To set the private key, use the `MEILI_MASTER_KEY` environment variable when starting. After setting the _master key_,
|
To set the private key, use the `MEILI_MASTER_KEY` environment variable when starting. After setting the _master key_,
|
||||||
you have to get the _private key_ and possibly _search key_, which are actually used for authentication.
|
you have to get the _private key_, which is actually used for authentication.
|
||||||
|
|
||||||
=== "OTP"
|
=== "OTP"
|
||||||
```sh
|
```sh
|
||||||
|
@ -58,11 +57,7 @@ you have to get the _private key_ and possibly _search key_, which are actually
|
||||||
mix pleroma.search.meilisearch show-keys <your master key here>
|
mix pleroma.search.meilisearch show-keys <your master key here>
|
||||||
```
|
```
|
||||||
|
|
||||||
You will see a "Default Admin API Key", this is the key you actually put into
|
You will see a "Default Admin API Key", this is the key you actually put into your configuration file.
|
||||||
your configuration file as `private_key`. You should also see a
|
|
||||||
"Default Search API key", put this into your config as `search_key`.
|
|
||||||
If your version of Meilisearch only showed the former,
|
|
||||||
just leave `search_key` completely unset in Akkoma's config.
|
|
||||||
|
|
||||||
### Initial indexing
|
### Initial indexing
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,7 @@ as soon as the post is received by your instance.
|
||||||
|
|
||||||
## Nginx
|
## Nginx
|
||||||
|
|
||||||
The following are excerpts from the [suggested nginx config](https://akkoma.dev/AkkomaGang/akkoma/src/branch/develop/installation/nginx/akkoma.nginx) that demonstrates the necessary config for the media proxy to work.
|
The following are excerpts from the [suggested nginx config](../../../installation/nginx/akkoma.nginx) that demonstrates the necessary config for the media proxy to work.
|
||||||
|
|
||||||
A `proxy_cache_path` must be defined, for example:
|
A `proxy_cache_path` must be defined, for example:
|
||||||
|
|
||||||
|
|
|
@ -1033,6 +1033,7 @@ Most of the settings will be applied in `runtime`, this means that you don't nee
|
||||||
- `:pools`
|
- `:pools`
|
||||||
- partially settings inside these keys:
|
- partially settings inside these keys:
|
||||||
- `:seconds_valid` in `Pleroma.Captcha`
|
- `:seconds_valid` in `Pleroma.Captcha`
|
||||||
|
- `:proxy_remote` in `Pleroma.Upload`
|
||||||
- `:upload_limit` in `:instance`
|
- `:upload_limit` in `:instance`
|
||||||
|
|
||||||
- Params:
|
- Params:
|
||||||
|
@ -1093,6 +1094,7 @@ List of settings which support only full update by subkey:
|
||||||
{"tuple": [":uploader", "Pleroma.Uploaders.Local"]},
|
{"tuple": [":uploader", "Pleroma.Uploaders.Local"]},
|
||||||
{"tuple": [":filters", ["Pleroma.Upload.Filter.Dedupe"]]},
|
{"tuple": [":filters", ["Pleroma.Upload.Filter.Dedupe"]]},
|
||||||
{"tuple": [":link_name", true]},
|
{"tuple": [":link_name", true]},
|
||||||
|
{"tuple": [":proxy_remote", false]},
|
||||||
{"tuple": [":proxy_opts", [
|
{"tuple": [":proxy_opts", [
|
||||||
{"tuple": [":redirect_on_failure", false]},
|
{"tuple": [":redirect_on_failure", false]},
|
||||||
{"tuple": [":max_body_length", 1048576]},
|
{"tuple": [":max_body_length", 1048576]},
|
||||||
|
|
|
@ -1,146 +0,0 @@
|
||||||
# Akkoma API
|
|
||||||
|
|
||||||
Request authentication (if required) and parameters work the same as for [Pleroma API](pleroma_api.md).
|
|
||||||
|
|
||||||
## `/api/v1/akkoma/preferred_frontend/available`
|
|
||||||
### Returns the available frontends which can be picked as the preferred choice
|
|
||||||
* Method: `GET`
|
|
||||||
* Authentication: not required
|
|
||||||
* Params: none
|
|
||||||
* Response: JSON
|
|
||||||
* Example response:
|
|
||||||
```json
|
|
||||||
["pleroma-fe/stable"]
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! note
|
|
||||||
There’s also a browser UI under `/akkoma/frontend`
|
|
||||||
for interactively querying and changing this.
|
|
||||||
|
|
||||||
## `/api/v1/akkoma/preferred_frontend`
|
|
||||||
### Configures the preferred frontend of this session
|
|
||||||
* Method: `PUT`
|
|
||||||
* Authentication: not required
|
|
||||||
* Params:
|
|
||||||
* `frontend_name`: STRING containing one of the available frontends
|
|
||||||
* Response: JSON
|
|
||||||
* Example response:
|
|
||||||
```json
|
|
||||||
{"frontend_name":"pleroma-fe/stable"}
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! note
|
|
||||||
There’s also a browser UI under `/akkoma/frontend`
|
|
||||||
for interactively querying and changing this.
|
|
||||||
|
|
||||||
## `/api/v1/akkoma/metrics`
|
|
||||||
### Provides metrics for Prometheus to scrape
|
|
||||||
* Method: `GET`
|
|
||||||
* Authentication: required (admin:metrics)
|
|
||||||
* Params: none
|
|
||||||
* Response: text
|
|
||||||
* Example response:
|
|
||||||
```
|
|
||||||
# HELP pleroma_remote_users_total
|
|
||||||
# TYPE pleroma_remote_users_total gauge
|
|
||||||
pleroma_remote_users_total 25
|
|
||||||
# HELP pleroma_local_statuses_total
|
|
||||||
# TYPE pleroma_local_statuses_total gauge
|
|
||||||
pleroma_local_statuses_total 17
|
|
||||||
# HELP pleroma_domains_total
|
|
||||||
# TYPE pleroma_domains_total gauge
|
|
||||||
pleroma_domains_total 4
|
|
||||||
# HELP pleroma_local_users_total
|
|
||||||
# TYPE pleroma_local_users_total gauge
|
|
||||||
pleroma_local_users_total 3
|
|
||||||
...
|
|
||||||
```
|
|
||||||
|
|
||||||
## `/api/v1/akkoma/translation/languages`
|
|
||||||
### Returns available source and target languages for automated text translation
|
|
||||||
* Method: `GET`
|
|
||||||
* Authentication: required
|
|
||||||
* Params: none
|
|
||||||
* Response: JSON
|
|
||||||
* Example response:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"source": [
|
|
||||||
{"code":"LV", "name":"Latvian"},
|
|
||||||
{"code":"ZH", "name":"Chinese (traditional)"},
|
|
||||||
{"code":"EN-US", "name":"English (American)"}
|
|
||||||
],
|
|
||||||
"target": [
|
|
||||||
{"code":"EN-GB", "name":"English (British)"},
|
|
||||||
{"code":"JP", "name":"Japanese"}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## `/api/v1/akkoma/frontend_settings/:frontend_name`
|
|
||||||
### Lists all configuration profiles of the selected frontend for the current user
|
|
||||||
* Method: `GET`
|
|
||||||
* Authentication: required
|
|
||||||
* Params: none
|
|
||||||
* Response: JSON
|
|
||||||
* Example response:
|
|
||||||
```json
|
|
||||||
[
|
|
||||||
{"name":"default","version":31}
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
## `/api/v1/akkoma/frontend_settings/:frontend_name/:profile_name`
|
|
||||||
### Returns the full selected frontend settings profile of the current user
|
|
||||||
* Method: `GET`
|
|
||||||
* Authentication: required
|
|
||||||
* Params: none
|
|
||||||
* Response: JSON
|
|
||||||
* Example response:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"version": 31,
|
|
||||||
"settings": {
|
|
||||||
"streaming": true,
|
|
||||||
"conversationDisplay": "tree",
|
|
||||||
...
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## `/api/v1/akkoma/frontend_settings/:frontend_name/:profile_name`
|
|
||||||
### Updates the frontend settings profile
|
|
||||||
* Method: `PUT`
|
|
||||||
* Authentication: required
|
|
||||||
* Params:
|
|
||||||
* `version`: INTEGER
|
|
||||||
* `settings`: JSON object containing the entire new settings
|
|
||||||
* Response: JSON
|
|
||||||
* Example response:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"streaming": false,
|
|
||||||
"conversationDisplay": "tree",
|
|
||||||
...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! note
|
|
||||||
The `version` field must be increased by exactly one on each update
|
|
||||||
|
|
||||||
## `/api/v1/akkoma/frontend_settings/:frontend_name/:profile_name`
|
|
||||||
### Drops the specified frontend settings profile
|
|
||||||
* Method: `DELETE`
|
|
||||||
* Authentication: required
|
|
||||||
* Params: none
|
|
||||||
* Response: JSON
|
|
||||||
* Example response:
|
|
||||||
```json
|
|
||||||
{"deleted":"ok"}
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
## `/api/v1/timelines/bubble`
|
|
||||||
### Returns a timeline for the local and closely related instances
|
|
||||||
Works like all other Mastodon-API timeline queries with the documented
|
|
||||||
[Akkoma-specific additions and tweaks](./differences_in_mastoapi_responses.md#timelines).
|
|
|
@ -1,6 +1,6 @@
|
||||||
# Differences in Mastodon API responses from vanilla Mastodon
|
# Differences in Mastodon API responses from vanilla Mastodon
|
||||||
|
|
||||||
A Akkoma instance can be identified by "<Mastodon version> (compatible; Akkoma <version>)" present in `version` field in response from `/api/v1/instance`
|
A Akkoma instance can be identified by "<Mastodon version> (compatible; Pleroma <version>)" present in `version` field in response from `/api/v1/instance`
|
||||||
|
|
||||||
## Flake IDs
|
## Flake IDs
|
||||||
|
|
||||||
|
@ -8,27 +8,19 @@ Akkoma uses 128-bit ids as opposed to Mastodon's 64 bits. However, just like Mas
|
||||||
|
|
||||||
## Timelines
|
## Timelines
|
||||||
|
|
||||||
In addition to Mastodon’s timelines, there is also a “bubble timeline” showing
|
|
||||||
posts from the local instance and a set of closely related instances as chosen
|
|
||||||
by the administrator. It is available under `/api/v1/timelines/bubble`.
|
|
||||||
|
|
||||||
Adding the parameter `with_muted=true` to the timeline queries will also return activities by muted (not by blocked!) users.
|
Adding the parameter `with_muted=true` to the timeline queries will also return activities by muted (not by blocked!) users.
|
||||||
|
|
||||||
Adding the parameter `exclude_visibilities` to the timeline queries will exclude the statuses with the given visibilities. The parameter accepts an array of visibility types (`public`, `unlisted`, `private`, `direct`), e.g., `exclude_visibilities[]=direct&exclude_visibilities[]=private`.
|
Adding the parameter `exclude_visibilities` to the timeline queries will exclude the statuses with the given visibilities. The parameter accepts an array of visibility types (`public`, `unlisted`, `private`, `direct`), e.g., `exclude_visibilities[]=direct&exclude_visibilities[]=private`.
|
||||||
|
|
||||||
Adding the parameter `reply_visibility` to the public, bubble or home timelines queries will filter replies. Possible values: without parameter (default) shows all replies, `following` - replies directed to you or users you follow, `self` - replies directed to you.
|
Adding the parameter `reply_visibility` to the public and home timelines queries will filter replies. Possible values: without parameter (default) shows all replies, `following` - replies directed to you or users you follow, `self` - replies directed to you.
|
||||||
|
|
||||||
Adding the parameter `instance=lain.com` to the public timeline will show only statuses originating from `lain.com` (or any remote instance).
|
Adding the parameter `instance=lain.com` to the public timeline will show only statuses originating from `lain.com` (or any remote instance).
|
||||||
|
|
||||||
All but the direct timeline accept these parameters:
|
Home, public, hashtag & list timelines accept these parameters:
|
||||||
|
|
||||||
- `only_media`: show only statuses with media attached
|
- `only_media`: show only statuses with media attached
|
||||||
- `remote`: show only remote statuses
|
|
||||||
|
|
||||||
Home, public, hashtag & list timelines further accept:
|
|
||||||
|
|
||||||
- `local`: show only local statuses
|
- `local`: show only local statuses
|
||||||
|
- `remote`: show only remote statuses
|
||||||
|
|
||||||
## Statuses
|
## Statuses
|
||||||
|
|
||||||
|
@ -121,12 +113,6 @@ Has these additional fields under the `pleroma` object:
|
||||||
- `notification_settings`: object, can be absent. See `/api/v1/pleroma/notification_settings` for the parameters/keys returned.
|
- `notification_settings`: object, can be absent. See `/api/v1/pleroma/notification_settings` for the parameters/keys returned.
|
||||||
- `favicon`: nullable URL string, Favicon image of the user's instance
|
- `favicon`: nullable URL string, Favicon image of the user's instance
|
||||||
|
|
||||||
Has these additional fields under the `akkoma` object:
|
|
||||||
|
|
||||||
- `instance`: nullable object with metadata about the user’s instance
|
|
||||||
- `status_ttl_days`: nullable int, default time after which statuses are deleted
|
|
||||||
- `permit_followback`: boolean, whether follows from followed accounts are auto-approved
|
|
||||||
|
|
||||||
### Source
|
### Source
|
||||||
|
|
||||||
Has these additional fields under the `pleroma` object:
|
Has these additional fields under the `pleroma` object:
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
The following endpoints are additionally present into our actors.
|
The following endpoints are additionally present into our actors.
|
||||||
|
|
||||||
- `oauthRegistrationEndpoint` (`http://litepub.social/ns#oauthRegistrationEndpoint`)
|
- `oauthRegistrationEndpoint` (`http://litepub.social/ns#oauthRegistrationEndpoint`)
|
||||||
|
- `uploadMedia` (`https://www.w3.org/ns/activitystreams#uploadMedia`)
|
||||||
|
|
||||||
### oauthRegistrationEndpoint
|
### oauthRegistrationEndpoint
|
||||||
|
|
||||||
|
@ -11,279 +12,6 @@ Points to MastodonAPI `/api/v1/apps` for now.
|
||||||
|
|
||||||
See <https://docs.joinmastodon.org/methods/apps/>
|
See <https://docs.joinmastodon.org/methods/apps/>
|
||||||
|
|
||||||
## Emoji reactions
|
|
||||||
|
|
||||||
Emoji reactions are implemented as a new activity type `EmojiReact`.
|
|
||||||
A single user is allowed to react multiple times with different emoji to the
|
|
||||||
same post. However, they may only react at most once with the same emoji.
|
|
||||||
Repeated reaction from the same user with the same emoji are to be ignored.
|
|
||||||
Emoji reactions are also distinct from `Like` activities and a user may both
|
|
||||||
`Like` and react to a post.
|
|
||||||
|
|
||||||
!!! note
|
|
||||||
Misskey also supports emoji reactions, but the implementations differs.
|
|
||||||
It equates likes and reactions and only allows a single reaction per post.
|
|
||||||
|
|
||||||
The emoji is placed in the `content` field of the activity
|
|
||||||
and the `object` property points to the note reacting to.
|
|
||||||
|
|
||||||
Emoji can either be any Unicode emoji sequence or a custom emoji.
|
|
||||||
The latter must place their shortcode, including enclosing colons,
|
|
||||||
into `content` and put the emoji object inside the `tag` property.
|
|
||||||
The `tag` property MAY be omitted for Unicode emoji.
|
|
||||||
|
|
||||||
An example reaction with a Unicode emoji:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"@context": [
|
|
||||||
"https://www.w3.org/ns/activitystreams",
|
|
||||||
"https://example.org/schemas/litepub-0.1.jsonld",
|
|
||||||
{
|
|
||||||
"@language": "und"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"type": "EmojiReact",
|
|
||||||
"id": "https://example.org/activities/23143872a0346141",
|
|
||||||
"actor": "https://example.org/users/akko",
|
|
||||||
"nickname": "akko",
|
|
||||||
"to": ["https://remote.example/users/diana", "https://example.org/users/akko/followers"],
|
|
||||||
"cc": ["https://www.w3.org/ns/activitystreams#Public"],
|
|
||||||
"content": "🧡",
|
|
||||||
"object": "https://remote.example/objects/9f0e93499d8314a9"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
An example reaction with a custom emoji:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"@context": [
|
|
||||||
"https://www.w3.org/ns/activitystreams",
|
|
||||||
"https://example.org/schemas/litepub-0.1.jsonld",
|
|
||||||
{
|
|
||||||
"@language": "und"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"type": "EmojiReact",
|
|
||||||
"id": "https://example.org/activities/d75586dec0541650",
|
|
||||||
"actor": "https://example.org/users/akko",
|
|
||||||
"nickname": "akko",
|
|
||||||
"to": ["https://remote.example/users/diana", "https://example.org/users/akko/followers"],
|
|
||||||
"cc": ["https://www.w3.org/ns/activitystreams#Public"],
|
|
||||||
"content": ":mouse:",
|
|
||||||
"object": "https://remote.example/objects/9f0e93499d8314a9",
|
|
||||||
"tag": [{
|
|
||||||
"type": "Emoji",
|
|
||||||
"id": null,
|
|
||||||
"name": "mouse",
|
|
||||||
"icon": {
|
|
||||||
"type": "Image",
|
|
||||||
"url": "https://example.org/emoji/mouse/mouse.png"
|
|
||||||
}
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
!!! note
|
|
||||||
Although an emoji reaction can only contain a single emoji,
|
|
||||||
for compatibility with older versions of Pleroma and Akkoma,
|
|
||||||
it is recommended to wrap the emoji object in a single-element array.
|
|
||||||
|
|
||||||
When reacting with a remote custom emoji do not include the remote domain in `content`’s shortcode
|
|
||||||
*(unlike in our REST API which needs the domain)*:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"@context": [
|
|
||||||
"https://www.w3.org/ns/activitystreams",
|
|
||||||
"https://example.org/schemas/litepub-0.1.jsonld",
|
|
||||||
{
|
|
||||||
"@language": "und"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"type": "EmojiReact",
|
|
||||||
"id": "https://example.org/activities/7993dcae98d8d5ec",
|
|
||||||
"actor": "https://example.org/users/akko",
|
|
||||||
"nickname": "akko",
|
|
||||||
"to": ["https://remote.example/users/diana", "https://example.org/users/akko/followers"],
|
|
||||||
"cc": ["https://www.w3.org/ns/activitystreams#Public"],
|
|
||||||
"content": ":hug:",
|
|
||||||
"object": "https://remote.example/objects/9f0e93499d8314a9",
|
|
||||||
"tag": [{
|
|
||||||
"type": "Emoji",
|
|
||||||
"id": "https://other.example/emojis/hug",
|
|
||||||
"name": "hug",
|
|
||||||
"icon": {
|
|
||||||
"type": "Image",
|
|
||||||
"url": "https://other.example/files/b71cea432b3fad67.webp"
|
|
||||||
}
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Emoji reactions can be retracted using a standard `Undo` activity:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"@context": [
|
|
||||||
"https://www.w3.org/ns/activitystreams",
|
|
||||||
"http://example.org/schemas/litepub-0.1.jsonld",
|
|
||||||
{
|
|
||||||
"@language": "und"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"type": "Undo",
|
|
||||||
"id": "http://example.org/activities/4685792e-efb6-4309-b508-ae4f355dd695",
|
|
||||||
"actor": "https://example.org/users/akko",
|
|
||||||
"to": ["https://remote.example/users/diana", "https://example.org/users/akko/followers"],
|
|
||||||
"cc": ["https://www.w3.org/ns/activitystreams#Public"],
|
|
||||||
"object": "https://example.org/activities/23143872a0346141"
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## User profile backgrounds
|
|
||||||
|
|
||||||
Akkoma federates user profile backgrounds the same way as Sharkey.
|
|
||||||
|
|
||||||
An actors ActivityPub representation contains an additional
|
|
||||||
`backgroundUrl` property containing an `Image` object. This property
|
|
||||||
belongs to the `"sharkey": "https://joinsharkey.org/ns#"` namespace.
|
|
||||||
|
|
||||||
## Quote Posts
|
|
||||||
|
|
||||||
Akkoma allows referencing a single other note as a quote,
|
|
||||||
which will be prominently displayed in the interface.
|
|
||||||
|
|
||||||
The quoted post is referenced by its ActivityPub id in the `quoteUri` property.
|
|
||||||
|
|
||||||
!!! note
|
|
||||||
Old Misskey only understood and modern Misskey still prefers
|
|
||||||
the `_misskey_quote` property for this. Similar some other older
|
|
||||||
software used `quoteUrl` or `quoteURL`.
|
|
||||||
All current implementations with quote support understand `quoteUri`.
|
|
||||||
|
|
||||||
Example:
|
|
||||||
```json
|
|
||||||
{
|
|
||||||
"@context": [
|
|
||||||
"https://www.w3.org/ns/activitystreams",
|
|
||||||
"https://example.org/schemas/litepub-0.1.jsonld",
|
|
||||||
{
|
|
||||||
"@language": "und"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"type": "Note",
|
|
||||||
"id": "https://example.org/activities/85717e587f95d5c0",
|
|
||||||
"actor": "https://example.org/users/akko",
|
|
||||||
"to": ["https://remote.example/users/diana", "https://example.org/users/akko/followers"],
|
|
||||||
"cc": ["https://www.w3.org/ns/activitystreams#Public"],
|
|
||||||
"context": "https://example.org/contexts/1",
|
|
||||||
"content": "Look at that!",
|
|
||||||
"quoteUri": "http://remote.example/status/85717e587f95d5c0",
|
|
||||||
"contentMap": {
|
|
||||||
"en": "Look at that!"
|
|
||||||
},
|
|
||||||
"source": {
|
|
||||||
"content": "Look at that!",
|
|
||||||
"mediaType": "text/plain"
|
|
||||||
},
|
|
||||||
"published": "2024-04-06T23:40:28Z",
|
|
||||||
"updated": "2024-04-06T23:40:28Z",
|
|
||||||
"attachemnt": [],
|
|
||||||
"tag": []
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Threads
|
|
||||||
|
|
||||||
Akkoma assigns all posts of the same thread the same `context`. This is a
|
|
||||||
standard ActivityPub property but its meaning is left vague. Akkoma will
|
|
||||||
always treat posts with identical `context` as part of the same thread.
|
|
||||||
|
|
||||||
`context` must not be assumed to hold any meaning or be dereferencable.
|
|
||||||
|
|
||||||
Incoming posts without `context` will be assigned a new context.
|
|
||||||
|
|
||||||
!!! note
|
|
||||||
Mastodon uses the non-standard `conversation` property for the same purpose
|
|
||||||
*(named after an older OStatus property)*. For incoming posts without
|
|
||||||
`context` but with `converstions` Akkoma will use the value from
|
|
||||||
`conversations` to fill in `context`.
|
|
||||||
For outgoing posts Akkoma will duplicate the context into `conversation`.
|
|
||||||
|
|
||||||
## Post Source
|
|
||||||
|
|
||||||
Unlike Mastodon, Akkoma supports drafting posts in multiple source formats
|
|
||||||
besides plaintext, like Markdown or MFM. The original input is preserved
|
|
||||||
in the standard ActivityPub `source` property *(not supported by Mastodon)*.
|
|
||||||
Still, `content` will always be present and contain the prerendered HTML form.
|
|
||||||
|
|
||||||
Supported `mediaType` include:
|
|
||||||
- `text/plain`
|
|
||||||
- `text/markdown`
|
|
||||||
- `text/bbcode`
|
|
||||||
- `text/x.misskeymarkdown`
|
|
||||||
|
|
||||||
## Post Language
|
|
||||||
|
|
||||||
!!! note
|
|
||||||
This is also supported in and compatible with Mastodon, but since
|
|
||||||
joinmastodon.org doesn’t document it yet it is included here.
|
|
||||||
[GoToSocial](https://docs.gotosocial.org/en/latest/federation/federating_with_gotosocial/#content-contentmap-and-language)
|
|
||||||
has a more refined version of this which can correctly deal with multiple language entries.
|
|
||||||
|
|
||||||
A post can indicate its language by including a `contentMap` object
|
|
||||||
which contains a sub key named after the language’s ISO 639-1 code
|
|
||||||
and it’s content identical to the post’s `content` field.
|
|
||||||
|
|
||||||
Currently Akkoma, just like Mastodon, only properly supports a single language entry,
|
|
||||||
in case of multiple entries a random language will be picked.
|
|
||||||
Furthermore, Akkoma currently only reads the `content` field
|
|
||||||
and never the value from `contentMap`.
|
|
||||||
|
|
||||||
## Local post scope
|
|
||||||
|
|
||||||
Post using this scope will never federate to other servers
|
|
||||||
but for the sake of completeness it is listed here.
|
|
||||||
|
|
||||||
In addition to the usual scopes *(public, unlisted, followers-only, direct)*
|
|
||||||
Akkoma supports an “unlisted” post scope. Such posts will not federate to
|
|
||||||
other instances and only be shown to logged-in users on the same instance.
|
|
||||||
It is included into the local timeline.
|
|
||||||
This may be useful to discuss or announce instance-specific policies and topics.
|
|
||||||
|
|
||||||
A post is addressed to the local scope by including `<base url of instance>/#Public`
|
|
||||||
in its `to` field. E.g. if the instance is on `https://example.org` it would use
|
|
||||||
`https://example.org/#Public`.
|
|
||||||
|
|
||||||
An implementation creating a new post MUST NOT address both the local and
|
|
||||||
general public scope `as:Public` at the same time. A post addressing the local
|
|
||||||
scope MUST NOT be sent to other instances or be possible to fetch by other
|
|
||||||
instances regardless of potential other listed addressees.
|
|
||||||
|
|
||||||
When receiving a remote post addressing both the public scope and what appears
|
|
||||||
to be a local-scope identifier, the post SHOULD be treated without assigning any
|
|
||||||
special meaning to the potential local-scope identifier.
|
|
||||||
|
|
||||||
!!! note
|
|
||||||
Misskey-derivatives have a similar concept of non-federated posts,
|
|
||||||
however those are also shown publicly on the local web interface
|
|
||||||
and are thus visible to non-members.
|
|
||||||
|
|
||||||
## List post scope
|
|
||||||
|
|
||||||
Messages originally addressed to a custom list will contain
|
|
||||||
a `listMessage` field with an unresolvable pseudo ActivityPub id.
|
|
||||||
|
|
||||||
# Deprecated and Removed Extensions
|
|
||||||
|
|
||||||
The following extensions were used in the past but have been dropped.
|
|
||||||
Documentation is retained here as a reference and since old objects might
|
|
||||||
still contains related fields.
|
|
||||||
|
|
||||||
## Actor endpoints
|
|
||||||
|
|
||||||
The following endpoints used to be present:
|
|
||||||
|
|
||||||
- `uploadMedia` (`https://www.w3.org/ns/activitystreams#uploadMedia`)
|
|
||||||
|
|
||||||
### uploadMedia
|
### uploadMedia
|
||||||
|
|
||||||
Inspired by <https://www.w3.org/wiki/SocialCG/ActivityPub/MediaUpload>, it is part of the ActivityStreams namespace because it used to be part of the ActivityPub specification and got removed from it.
|
Inspired by <https://www.w3.org/wiki/SocialCG/ActivityPub/MediaUpload>, it is part of the ActivityStreams namespace because it used to be part of the ActivityPub specification and got removed from it.
|
||||||
|
@ -292,8 +20,9 @@ Content-Type: multipart/form-data
|
||||||
|
|
||||||
Parameters:
|
Parameters:
|
||||||
- (required) `file`: The file being uploaded
|
- (required) `file`: The file being uploaded
|
||||||
- (optional) `description`: A plain-text description of the media, for accessibility purposes.
|
- (optionnal) `description`: A plain-text description of the media, for accessibility purposes.
|
||||||
|
|
||||||
Response: HTTP 201 Created with the object into the body, no `Location` header provided as it doesn't have an `id`
|
Response: HTTP 201 Created with the object into the body, no `Location` header provided as it doesn't have an `id`
|
||||||
|
|
||||||
The object given in the response should then be inserted into an Object's `attachment` field.
|
The object given in the reponse should then be inserted into an Object's `attachment` field.
|
||||||
|
|
||||||
|
|
|
@ -1,141 +0,0 @@
|
||||||
# Nodeinfo Extensions
|
|
||||||
|
|
||||||
Akkoma currently implements version 2.0 and 2.1 of nodeinfo spec,
|
|
||||||
but provides the following additional fields.
|
|
||||||
|
|
||||||
## metadata
|
|
||||||
|
|
||||||
The spec leaves the content of `metadata` up to implementations
|
|
||||||
and indeed Akkoma adds many fields here apart from the commonly
|
|
||||||
found `nodeName` and `nodeDescription` fields.
|
|
||||||
|
|
||||||
### accountActivationRequired
|
|
||||||
Whether or not users need to confirm their email before completing registration.
|
|
||||||
*(boolean)*
|
|
||||||
|
|
||||||
!!! note
|
|
||||||
Not to be confused with account approval, where each registration needs to
|
|
||||||
be manually approved by an admin. Account approval has no nodeinfo entry.
|
|
||||||
|
|
||||||
### features
|
|
||||||
|
|
||||||
Array of strings denoting supported server features. E.g. a server supporting
|
|
||||||
quote posts should include a `"quote_posting"` entry here.
|
|
||||||
|
|
||||||
A non-exhaustive list of possible features:
|
|
||||||
- `polls`
|
|
||||||
- `quote_posting`
|
|
||||||
- `editing`
|
|
||||||
- `bubble_timeline`
|
|
||||||
- `pleroma_emoji_reactions` *(Unicode emoji)*
|
|
||||||
- `custom_emoji_reactions`
|
|
||||||
- `akkoma_api`
|
|
||||||
- `akkoma:machine_translation`
|
|
||||||
- `mastodon_api`
|
|
||||||
- `pleroma_api`
|
|
||||||
|
|
||||||
### federatedTimelineAvailable
|
|
||||||
Whether or not the “federated timeline”, i.e. a timeline containing posts from
|
|
||||||
the entire known network, is made available.
|
|
||||||
*(boolean)*
|
|
||||||
|
|
||||||
### federation
|
|
||||||
This section is optional and can contain various custom keys describing federation policies.
|
|
||||||
The following are required to be presented:
|
|
||||||
- `enabled` *(boolean)* whether the server federates at all
|
|
||||||
|
|
||||||
A non-exhaustive list of optional keys:
|
|
||||||
- `exclusions` *(boolean)* whether some federation policies are withheld
|
|
||||||
- `mrf_simple` *(object)* describes how the Simple MRF policy is configured
|
|
||||||
|
|
||||||
### fieldsLimits
|
|
||||||
A JSON object documenting restriction for user account info fields.
|
|
||||||
All properties are integers.
|
|
||||||
|
|
||||||
- `maxFields` maximum number of account info fields local users can create
|
|
||||||
- `maxRemoteFields` maximum number of account info fields remote users can have
|
|
||||||
before the user gets rejected or fields truncated
|
|
||||||
- `nameLength` maximum length of a field’s name
|
|
||||||
- `valueLength` maximum length of a field’s value
|
|
||||||
|
|
||||||
### invitesEnabled
|
|
||||||
Whether or not signing up via invite codes is possible.
|
|
||||||
*(boolean)*
|
|
||||||
|
|
||||||
### localBubbleInstances
|
|
||||||
Array of domains (as strings) of other instances chosen
|
|
||||||
by the admin which are shown in the bubble timeline.
|
|
||||||
|
|
||||||
### mailerEnabled
|
|
||||||
Whether or not the instance can send out emails.
|
|
||||||
*(boolean)*
|
|
||||||
|
|
||||||
### nodeDescription
|
|
||||||
Human-friendly description of this instance
|
|
||||||
*(string)*
|
|
||||||
|
|
||||||
### nodeName
|
|
||||||
Human-friendly name of this instance
|
|
||||||
*(string)*
|
|
||||||
|
|
||||||
### pollLimits
|
|
||||||
JSON object containing limits for polls created by local users.
|
|
||||||
All values are integers.
|
|
||||||
- `max_options` maximum number of poll options
|
|
||||||
- `max_option_chars` maximum characters per poll option
|
|
||||||
- `min_expiration` minimum time in seconds a poll must be open for
|
|
||||||
- `max_expiration` maximum time a poll is allowed to be open for
|
|
||||||
|
|
||||||
### postFormats
|
|
||||||
Array of strings containing media types for supported post source formats.
|
|
||||||
A non-exhaustive list of possible values:
|
|
||||||
- `text/plain`
|
|
||||||
- `text/markdown`
|
|
||||||
- `text/bbcode`
|
|
||||||
- `text/x.misskeymarkdown`
|
|
||||||
|
|
||||||
### private
|
|
||||||
Whether or not unauthenticated API access is permitted.
|
|
||||||
*(boolean)*
|
|
||||||
|
|
||||||
### privilegedStaff
|
|
||||||
Whether or not moderators are trusted to perform some
|
|
||||||
additional tasks like e.g. issuing password reset emails.
|
|
||||||
|
|
||||||
### publicTimelineVisibility
|
|
||||||
JSON object containing boolean-valued keys reporting
|
|
||||||
if a given timeline can be viewed without login.
|
|
||||||
- `local`
|
|
||||||
- `federated`
|
|
||||||
- `bubble`
|
|
||||||
|
|
||||||
### restrictedNicknames
|
|
||||||
Array of strings listing nicknames forbidden to be used during signup.
|
|
||||||
|
|
||||||
### skipThreadContainment
|
|
||||||
Whether broken threads are filtered out
|
|
||||||
*(boolean)*
|
|
||||||
|
|
||||||
### staffAccounts
|
|
||||||
Array containing ActivityPub IDs of local accounts
|
|
||||||
with some form of elevated privilege on the instance.
|
|
||||||
|
|
||||||
### suggestions
|
|
||||||
JSON object containing info on whether the interaction-based
|
|
||||||
Mastodon `/api/v1/suggestions` feature is enabled and optionally
|
|
||||||
additional implementation-defined fields with more details
|
|
||||||
on e.g. how suggested users are selected.
|
|
||||||
|
|
||||||
!!! note
|
|
||||||
This has no relation to the newer /api/v2/suggestions API
|
|
||||||
which also (or exclusively) contains staff-curated entries.
|
|
||||||
|
|
||||||
- `enabled` *(boolean)* whether or not user recommendations are enabled
|
|
||||||
|
|
||||||
### uploadLimits
|
|
||||||
JSON object documenting various upload-related size limits.
|
|
||||||
All values are integers and in bytes.
|
|
||||||
- `avatar` maximum size of uploaded user avatars
|
|
||||||
- `banner` maximum size of uploaded user profile banners
|
|
||||||
- `background` maximum size of uploaded user profile backgrounds
|
|
||||||
- `general` maximum size for all other kinds of uploads
|
|
|
@ -145,13 +145,47 @@ If you want to open your newly installed instance to the world, you should run n
|
||||||
doas apk add nginx
|
doas apk add nginx
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* Setup your SSL cert, using your method of choice or certbot. If using certbot, first install it:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
doas apk add certbot
|
||||||
|
```
|
||||||
|
|
||||||
|
and then set it up:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
doas mkdir -p /var/lib/letsencrypt/
|
||||||
|
doas certbot certonly --email <your@emailaddress> -d <yourdomain> --standalone
|
||||||
|
```
|
||||||
|
|
||||||
|
If that doesn’t work, make sure, that nginx is not already running. If it still doesn’t work, try setting up nginx first (change ssl “on” to “off” and try again).
|
||||||
|
|
||||||
* Copy the example nginx configuration to the nginx folder
|
* Copy the example nginx configuration to the nginx folder
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
doas cp /opt/akkoma/installation/nginx/akkoma.nginx /etc/nginx/conf.d/akkoma.conf
|
doas cp /opt/akkoma/installation/nginx/akkoma.nginx /etc/nginx/conf.d/akkoma.conf
|
||||||
```
|
```
|
||||||
|
|
||||||
* Before starting nginx edit the configuration and change it to your needs. You must change change `server_name`. You can use `nano` (install with `apk add nano` if missing).
|
* Before starting nginx edit the configuration and change it to your needs. You must change change `server_name` and the paths to the certificates. You can use `nano` (install with `apk add nano` if missing).
|
||||||
|
|
||||||
|
```
|
||||||
|
server {
|
||||||
|
server_name your.domain;
|
||||||
|
listen 80;
|
||||||
|
...
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
server_name your.domain;
|
||||||
|
listen 443 ssl http2;
|
||||||
|
...
|
||||||
|
ssl_trusted_certificate /etc/letsencrypt/live/your.domain/chain.pem;
|
||||||
|
ssl_certificate /etc/letsencrypt/live/your.domain/fullchain.pem;
|
||||||
|
ssl_certificate_key /etc/letsencrypt/live/your.domain/privkey.pem;
|
||||||
|
...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
* Enable and start nginx:
|
* Enable and start nginx:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
|
@ -159,37 +193,10 @@ doas rc-update add nginx
|
||||||
doas rc-service nginx start
|
doas rc-service nginx start
|
||||||
```
|
```
|
||||||
|
|
||||||
* Setup your SSL cert, using your method of choice or certbot. If using certbot, first install it:
|
If you need to renew the certificate in the future, uncomment the relevant location block in the nginx config and run:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
doas apk add certbot certbot-nginx
|
doas certbot certonly --email <your@emailaddress> -d <yourdomain> --webroot -w /var/lib/letsencrypt/
|
||||||
```
|
|
||||||
|
|
||||||
and then set it up:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
doas mkdir -p /var/lib/letsencrypt/
|
|
||||||
doas certbot --email <your@emailaddress> -d <yourdomain> -d <media_domain> --nginx
|
|
||||||
```
|
|
||||||
|
|
||||||
If that doesn't work the first time, add `--dry-run` to further attempts to avoid being ratelimited as you identify the issue, and do not remove it until the dry run succeeds. A common source of problems are nginx config syntax errors; this can be checked for by running `nginx -t`.
|
|
||||||
|
|
||||||
To automatically renew, set up a cron job like so:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
# Enable the crond service
|
|
||||||
doas rc-update add crond
|
|
||||||
doas rc-service crond start
|
|
||||||
|
|
||||||
# Test that renewals work
|
|
||||||
doas certbot renew --cert-name yourinstance.tld --nginx --dry-run
|
|
||||||
|
|
||||||
# Add the renewal task to cron
|
|
||||||
echo '#!/bin/sh
|
|
||||||
certbot renew --cert-name yourinstance.tld --nginx
|
|
||||||
' | doas tee /etc/periodic/daily/renew-akkoma-cert
|
|
||||||
doas chmod +x /etc/periodic/daily/renew-akkoma-cert
|
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### OpenRC service
|
#### OpenRC service
|
||||||
|
|
|
@ -136,17 +136,16 @@ If you want to open your newly installed instance to the world, you should run n
|
||||||
sudo pacman -S nginx
|
sudo pacman -S nginx
|
||||||
```
|
```
|
||||||
|
|
||||||
* Copy the example nginx configuration:
|
* Create directories for available and enabled sites:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
sudo cp /opt/akkoma/installation/nginx/akkoma.nginx /etc/nginx/conf.d/akkoma.conf
|
sudo mkdir -p /etc/nginx/sites-{available,enabled}
|
||||||
```
|
```
|
||||||
|
|
||||||
* Before starting nginx edit the configuration and change it to your needs (e.g. change servername, change cert paths)
|
* Append the following line at the end of the `http` block in `/etc/nginx/nginx.conf`:
|
||||||
* Enable and start nginx:
|
|
||||||
|
|
||||||
```shell
|
```Nginx
|
||||||
sudo systemctl enable --now nginx.service
|
include sites-enabled/*;
|
||||||
```
|
```
|
||||||
|
|
||||||
* Setup your SSL cert, using your method of choice or certbot. If using certbot, first install it:
|
* Setup your SSL cert, using your method of choice or certbot. If using certbot, first install it:
|
||||||
|
@ -159,18 +158,32 @@ and then set it up:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
sudo mkdir -p /var/lib/letsencrypt/
|
sudo mkdir -p /var/lib/letsencrypt/
|
||||||
sudo certbot --email <your@emailaddress> -d <yourdomain> -d <media_domain> --nginx
|
sudo certbot certonly --email <your@emailaddress> -d <yourdomain> --standalone
|
||||||
```
|
```
|
||||||
|
|
||||||
If that doesn't work the first time, add `--dry-run` to further attempts to avoid being ratelimited as you identify the issue, and do not remove it until the dry run succeeds. A common source of problems are nginx config syntax errors; this can be checked for by running `nginx -t`.
|
If that doesn’t work, make sure, that nginx is not already running. If it still doesn’t work, try setting up nginx first (change ssl “on” to “off” and try again).
|
||||||
|
|
||||||
To make sure renewals work, enable the appropriate systemd timer:
|
---
|
||||||
|
|
||||||
|
* Copy the example nginx configuration and activate it:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
sudo systemctl enable --now certbot-renew.timer
|
sudo cp /opt/akkoma/installation/nginx/akkoma.nginx /etc/nginx/sites-available/akkoma.nginx
|
||||||
|
sudo ln -s /etc/nginx/sites-available/akkoma.nginx /etc/nginx/sites-enabled/akkoma.nginx
|
||||||
```
|
```
|
||||||
|
|
||||||
Certificate renewal should be handled automatically by Certbot from now on.
|
* Before starting nginx edit the configuration and change it to your needs (e.g. change servername, change cert paths)
|
||||||
|
* Enable and start nginx:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
sudo systemctl enable --now nginx.service
|
||||||
|
```
|
||||||
|
|
||||||
|
If you need to renew the certificate in the future, uncomment the relevant location block in the nginx config and run:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
sudo certbot certonly --email <your@emailaddress> -d <yourdomain> --webroot -w /var/lib/letsencrypt/
|
||||||
|
```
|
||||||
|
|
||||||
#### Other webserver/proxies
|
#### Other webserver/proxies
|
||||||
|
|
||||||
|
|
|
@ -155,6 +155,23 @@ If you want to open your newly installed instance to the world, you should run n
|
||||||
sudo apt install nginx
|
sudo apt install nginx
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* Setup your SSL cert, using your method of choice or certbot. If using certbot, first install it:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
sudo apt install certbot
|
||||||
|
```
|
||||||
|
|
||||||
|
and then set it up:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
sudo mkdir -p /var/lib/letsencrypt/
|
||||||
|
sudo certbot certonly --email <your@emailaddress> -d <yourdomain> --standalone
|
||||||
|
```
|
||||||
|
|
||||||
|
If that doesn’t work, make sure, that nginx is not already running. If it still doesn’t work, try setting up nginx first (change ssl “on” to “off” and try again).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
* Copy the example nginx configuration and activate it:
|
* Copy the example nginx configuration and activate it:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
|
@ -169,23 +186,12 @@ sudo ln -s /etc/nginx/sites-available/akkoma.nginx /etc/nginx/sites-enabled/akko
|
||||||
sudo systemctl enable --now nginx.service
|
sudo systemctl enable --now nginx.service
|
||||||
```
|
```
|
||||||
|
|
||||||
* Setup your SSL cert, using your method of choice or certbot. If using certbot, first install it:
|
If you need to renew the certificate in the future, uncomment the relevant location block in the nginx config and run:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
sudo apt install certbot python3-certbot-nginx
|
sudo certbot certonly --email <your@emailaddress> -d <yourdomain> --webroot -w /var/lib/letsencrypt/
|
||||||
```
|
```
|
||||||
|
|
||||||
and then set it up:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
sudo mkdir -p /var/lib/letsencrypt/
|
|
||||||
sudo certbot --email <your@emailaddress> -d <yourdomain> -d <media_domain> --nginx
|
|
||||||
```
|
|
||||||
|
|
||||||
If that doesn't work the first time, add `--dry-run` to further attempts to avoid being ratelimited as you identify the issue, and do not remove it until the dry run succeeds. A common source of problems are nginx config syntax errors; this can be checked for by running `nginx -t`.
|
|
||||||
|
|
||||||
Certificate renewal should be handled automatically by Certbot from now on.
|
|
||||||
|
|
||||||
#### Other webserver/proxies
|
#### Other webserver/proxies
|
||||||
|
|
||||||
You can find example configurations for them in `/opt/akkoma/installation/`.
|
You can find example configurations for them in `/opt/akkoma/installation/`.
|
||||||
|
|
|
@ -125,26 +125,7 @@ cp docker-resources/Caddyfile.example docker-resources/Caddyfile
|
||||||
|
|
||||||
Then edit the TLD in your caddyfile to the domain you're serving on.
|
Then edit the TLD in your caddyfile to the domain you're serving on.
|
||||||
|
|
||||||
Copy the commented out `caddy` section in `docker-compose.yml` into a new file called `docker-compose.override.yml` like so:
|
Uncomment the `caddy` section in the docker compose file,
|
||||||
```yaml
|
|
||||||
version: "3.7"
|
|
||||||
|
|
||||||
services:
|
|
||||||
proxy:
|
|
||||||
image: caddy:2-alpine
|
|
||||||
restart: unless-stopped
|
|
||||||
links:
|
|
||||||
- akkoma
|
|
||||||
ports: [
|
|
||||||
"443:443",
|
|
||||||
"80:80"
|
|
||||||
]
|
|
||||||
volumes:
|
|
||||||
- ./docker-resources/Caddyfile:/etc/caddy/Caddyfile
|
|
||||||
- ./caddy-data:/data
|
|
||||||
- ./caddy-config:/config
|
|
||||||
```
|
|
||||||
|
|
||||||
then run `docker compose up -d` again.
|
then run `docker compose up -d` again.
|
||||||
|
|
||||||
#### Running a reverse proxy on the host
|
#### Running a reverse proxy on the host
|
||||||
|
@ -174,12 +155,6 @@ git pull
|
||||||
docker compose restart akkoma db
|
docker compose restart akkoma db
|
||||||
```
|
```
|
||||||
|
|
||||||
### Modifying the Docker services
|
|
||||||
If you want to modify the services defined in the docker compose file, you can
|
|
||||||
create a new file called `docker-compose.override.yml`. There you can add any
|
|
||||||
overrides or additional services without worrying about git conflicts when a
|
|
||||||
new release comes out.
|
|
||||||
|
|
||||||
#### Further reading
|
#### Further reading
|
||||||
|
|
||||||
{! installation/further_reading.include !}
|
{! installation/further_reading.include !}
|
||||||
|
|
|
@ -135,6 +135,23 @@ If you want to open your newly installed instance to the world, you should run n
|
||||||
sudo dnf install nginx
|
sudo dnf install nginx
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* Setup your SSL cert, using your method of choice or certbot. If using certbot, first install it:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
sudo dnf install certbot
|
||||||
|
```
|
||||||
|
|
||||||
|
and then set it up:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
sudo mkdir -p /var/lib/letsencrypt/
|
||||||
|
sudo certbot certonly --email <your@emailaddress> -d <yourdomain> --standalone
|
||||||
|
```
|
||||||
|
|
||||||
|
If that doesn’t work, make sure, that nginx is not already running. If it still doesn’t work, try setting up nginx first (change ssl “on” to “off” and try again).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
* Copy the example nginx configuration and activate it:
|
* Copy the example nginx configuration and activate it:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
|
@ -148,23 +165,12 @@ sudo cp /opt/akkoma/installation/nginx/akkoma.nginx /etc/nginx/conf.d/akkoma.con
|
||||||
sudo systemctl enable --now nginx.service
|
sudo systemctl enable --now nginx.service
|
||||||
```
|
```
|
||||||
|
|
||||||
* Setup your SSL cert, using your method of choice or certbot. If using certbot, first install it:
|
If you need to renew the certificate in the future, uncomment the relevant location block in the nginx config and run:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
sudo dnf install certbot python3-certbot-nginx
|
sudo certbot certonly --email <your@emailaddress> -d <yourdomain> --webroot -w /var/lib/letsencrypt/
|
||||||
```
|
```
|
||||||
|
|
||||||
and then set it up:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
sudo certbot --email <your@emailaddress> -d <yourdomain> -d <media_domain> --nginx
|
|
||||||
```
|
|
||||||
|
|
||||||
If that doesn't work the first time, add `--dry-run` to further attempts to avoid being ratelimited as you identify the issue, and do not remove it until the dry run succeeds. A common source of problems are nginx config syntax errors; this can be checked for by running `nginx -t`.
|
|
||||||
|
|
||||||
Certificate renewal should be handled automatically by Certbot from now on.
|
|
||||||
|
|
||||||
|
|
||||||
#### Other webserver/proxies
|
#### Other webserver/proxies
|
||||||
|
|
||||||
You can find example configurations for them in `/opt/akkoma/installation/`.
|
You can find example configurations for them in `/opt/akkoma/installation/`.
|
||||||
|
|
|
@ -6,9 +6,7 @@ probably install frontends.
|
||||||
These are no longer bundled with the distribution and need an extra
|
These are no longer bundled with the distribution and need an extra
|
||||||
command to install.
|
command to install.
|
||||||
|
|
||||||
You **must** run frontend management tasks as the akkoma user,
|
For most installations, the following will suffice:
|
||||||
the same way you downloaded the build or cloned the git repo before.
|
|
||||||
But otherwise, for most installations, the following will suffice:
|
|
||||||
|
|
||||||
=== "OTP"
|
=== "OTP"
|
||||||
```sh
|
```sh
|
||||||
|
@ -30,3 +28,4 @@ But otherwise, for most installations, the following will suffice:
|
||||||
```
|
```
|
||||||
|
|
||||||
For more customised installations, refer to [Frontend Management](../../configuration/frontend_management)
|
For more customised installations, refer to [Frontend Management](../../configuration/frontend_management)
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
## Required dependencies
|
## Required dependencies
|
||||||
|
|
||||||
* PostgreSQL 12+
|
* PostgreSQL 9.6+
|
||||||
* Elixir 1.14+ (currently tested up to 1.16)
|
* Elixir 1.14+
|
||||||
* Erlang OTP 25+ (currently tested up to OTP26)
|
* Erlang OTP 24+
|
||||||
* git
|
* git
|
||||||
* file / libmagic
|
* file / libmagic
|
||||||
* gcc (clang might also work)
|
* gcc (clang might also work)
|
||||||
|
|
|
@ -201,6 +201,25 @@ Assuming you want to open your newly installed federated social network to, well
|
||||||
include sites-enabled/*;
|
include sites-enabled/*;
|
||||||
```
|
```
|
||||||
|
|
||||||
|
* Setup your SSL cert, using your method of choice or certbot. If using certbot, install it if you haven't already:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
# emerge --ask app-crypt/certbot app-crypt/certbot-nginx
|
||||||
|
```
|
||||||
|
|
||||||
|
and then set it up:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
# mkdir -p /var/lib/letsencrypt/
|
||||||
|
# certbot certonly --email <your@emailaddress> -d <yourdomain> --standalone
|
||||||
|
```
|
||||||
|
|
||||||
|
If that doesn't work the first time, add `--dry-run` to further attempts to avoid being ratelimited as you identify the issue, and do not remove it until the dry run succeeds. If that doesn’t work, make sure, that nginx is not already running. If it still doesn’t work, try setting up nginx first (change ssl “on” to “off” and try again). Often the answer to issues with certbot is to use the `--nginx` flag once you have nginx up and running.
|
||||||
|
|
||||||
|
If you are using any additional subdomains, such as for a media proxy, you can re-run the same command with the subdomain in question. When it comes time to renew later, you will not need to run multiple times for each domain, one renew will handle it.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
* Copy the example nginx configuration and activate it:
|
* Copy the example nginx configuration and activate it:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
|
@ -218,24 +237,9 @@ Pay special attention to the line that begins with `ssl_ecdh_curve`. It is stong
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
# rc-update add nginx default
|
# rc-update add nginx default
|
||||||
# rc-service nginx start
|
# /etc/init.d/nginx start
|
||||||
```
|
```
|
||||||
|
|
||||||
* Setup your SSL cert, using your method of choice or certbot. If using certbot, install it if you haven't already:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
# emerge --ask app-crypt/certbot app-crypt/certbot-nginx
|
|
||||||
```
|
|
||||||
|
|
||||||
and then set it up:
|
|
||||||
|
|
||||||
```shell
|
|
||||||
# mkdir -p /var/lib/letsencrypt/
|
|
||||||
# certbot --email <your@emailaddress> -d <yourdomain> -d <media_domain> --nginx
|
|
||||||
```
|
|
||||||
|
|
||||||
If that doesn't work the first time, add `--dry-run` to further attempts to avoid being ratelimited as you identify the issue, and do not remove it until the dry run succeeds. A common source of problems are nginx config syntax errors; this can be checked for by running `nginx -t`.
|
|
||||||
|
|
||||||
If you are using certbot, it is HIGHLY recommend you set up a cron job that renews your certificate, and that you install the suggested `certbot-nginx` plugin. If you don't do these things, you only have yourself to blame when your instance breaks suddenly because you forgot about it.
|
If you are using certbot, it is HIGHLY recommend you set up a cron job that renews your certificate, and that you install the suggested `certbot-nginx` plugin. If you don't do these things, you only have yourself to blame when your instance breaks suddenly because you forgot about it.
|
||||||
|
|
||||||
First, ensure that the command you will be installing into your crontab works.
|
First, ensure that the command you will be installing into your crontab works.
|
||||||
|
|
|
@ -21,33 +21,6 @@ fork of Akkoma - luckily this isn't very hard.
|
||||||
You'll need to update the backend, then possibly the frontend, depending
|
You'll need to update the backend, then possibly the frontend, depending
|
||||||
on your setup.
|
on your setup.
|
||||||
|
|
||||||
## Backup diverging features
|
|
||||||
|
|
||||||
As time goes on Akkoma and Pleroma added or removed different features
|
|
||||||
and reorganised the database in a different way. If you want to be able to
|
|
||||||
migrate back to Pleroma without losing any affected data, you’ll want to
|
|
||||||
make a backup before starting the migration.
|
|
||||||
If you're not interested in migrating back, skip this section
|
|
||||||
*(although it might be a good idea to temporarily keep a full DB backup
|
|
||||||
just in case something unexpected happens during migration)*
|
|
||||||
|
|
||||||
As of 2024-02 you will want to keep a backup of:
|
|
||||||
|
|
||||||
- the entire `chats` and `chat_message_references` tables
|
|
||||||
|
|
||||||
The following columns are not deleted by a migration to Akkoma, but a migration
|
|
||||||
back to Pleroma or future Akkoma upgrades might affect them, so perhaps back them up as well:
|
|
||||||
|
|
||||||
- the `birthday` of users and their `show_birthday` setting
|
|
||||||
- the `expires_at` key of in the `user_relationships` table
|
|
||||||
*(used by temporary mutes)*
|
|
||||||
|
|
||||||
The way cached instance metadata is stored differs, but since those
|
|
||||||
will be refetched and updated anyway, there’s no need for a backup.
|
|
||||||
|
|
||||||
Best check all newer migrations unique to Akkoma/Pleroma
|
|
||||||
to get an up-to-date picture of what needs to be kept.
|
|
||||||
|
|
||||||
## From Source
|
## From Source
|
||||||
|
|
||||||
If you're running the source Akkoma install, you'll need to set the
|
If you're running the source Akkoma install, you'll need to set the
|
||||||
|
@ -61,7 +34,16 @@ git pull -r
|
||||||
# to run "git merge stable" instead (or develop if you want)
|
# to run "git merge stable" instead (or develop if you want)
|
||||||
```
|
```
|
||||||
|
|
||||||
And compile as usual.
|
### WARNING - Migrating from Pleroma Develop
|
||||||
|
If you are on pleroma develop, and have updated since 2022-08, you may have issues with database migrations.
|
||||||
|
|
||||||
|
Please roll back the given migrations:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
MIX_ENV=prod mix ecto.rollback --migrations-path priv/repo/optional_migrations/pleroma_develop_rollbacks -n3
|
||||||
|
```
|
||||||
|
|
||||||
|
Then compile, migrate and restart as usual.
|
||||||
|
|
||||||
## From OTP
|
## From OTP
|
||||||
|
|
||||||
|
@ -71,44 +53,15 @@ This will just be setting the update URL - find your flavour from the [mapping o
|
||||||
export FLAVOUR=[the flavour you found above]
|
export FLAVOUR=[the flavour you found above]
|
||||||
|
|
||||||
./bin/pleroma_ctl update --zip-url https://akkoma-updates.s3-website.fr-par.scw.cloud/stable/akkoma-$FLAVOUR.zip
|
./bin/pleroma_ctl update --zip-url https://akkoma-updates.s3-website.fr-par.scw.cloud/stable/akkoma-$FLAVOUR.zip
|
||||||
|
./bin/pleroma_ctl migrate
|
||||||
```
|
```
|
||||||
|
|
||||||
When updating in the future, you can just use
|
Then restart. When updating in the future, you canjust use
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./bin/pleroma_ctl update --branch stable
|
./bin/pleroma_ctl update --branch stable
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## Database Migrations
|
|
||||||
### WARNING - Migrating from Pleroma past 2022-08
|
|
||||||
If you are on Pleroma stable >= 2.5.0 or Pleroma develop, and
|
|
||||||
have updated since 2022-08, you may have issues with database migrations.
|
|
||||||
|
|
||||||
Please first roll back the given migrations:
|
|
||||||
|
|
||||||
=== "OTP"
|
|
||||||
```bash
|
|
||||||
./bin/pleroma_ctl rollback --migrations-path priv/repo/optional_migrations/pleroma_develop_rollbacks -n5
|
|
||||||
```
|
|
||||||
=== "From Source"
|
|
||||||
```bash
|
|
||||||
MIX_ENV=prod mix ecto.rollback --migrations-path priv/repo/optional_migrations/pleroma_develop_rollbacks -n5
|
|
||||||
```
|
|
||||||
|
|
||||||
### Applying Akkoma Database Migrations
|
|
||||||
|
|
||||||
Just run
|
|
||||||
|
|
||||||
=== "OTP"
|
|
||||||
```bash
|
|
||||||
./bin/pleroma_ctl migrate
|
|
||||||
```
|
|
||||||
=== "From Source"
|
|
||||||
```bash
|
|
||||||
MIX_ENV=prod mix ecto.migrate
|
|
||||||
```
|
|
||||||
|
|
||||||
## Frontend changes
|
## Frontend changes
|
||||||
|
|
||||||
Akkoma comes with a few frontend changes as well as backend ones,
|
Akkoma comes with a few frontend changes as well as backend ones,
|
||||||
|
@ -177,4 +130,3 @@ MIX_ENV=prod mix ecto.rollback --to 20210416051708
|
||||||
```
|
```
|
||||||
|
|
||||||
Then switch back to Pleroma for updates (similar to how was done to migrate to Akkoma), and remove the front-ends. The front-ends are installed in the `frontends` folder in the [static directory](../configuration/static_dir.md). Once you are back to Pleroma, you will need to run the database migrations again. See the Pleroma documentation for this.
|
Then switch back to Pleroma for updates (similar to how was done to migrate to Akkoma), and remove the front-ends. The front-ends are installed in the `frontends` folder in the [static directory](../configuration/static_dir.md). Once you are back to Pleroma, you will need to run the database migrations again. See the Pleroma documentation for this.
|
||||||
After this use your previous backups to restore data from diverging features.
|
|
||||||
|
|
|
@ -14,7 +14,7 @@ Note: the packages are not required with the current default settings of Akkoma.
|
||||||
`ImageMagick` is a set of tools to create, edit, compose, or convert bitmap images.
|
`ImageMagick` is a set of tools to create, edit, compose, or convert bitmap images.
|
||||||
|
|
||||||
It is required for the following Akkoma features:
|
It is required for the following Akkoma features:
|
||||||
* `Pleroma.Upload.Filters.Mogrify`, `Pleroma.Upload.Filters.Mogrifun` upload filters (related config: `Pleroma.Upload/filters` in `config/config.exs`)
|
* `Pleroma.Upload.Filters.Mogrify`, `Pleroma.Upload.Filters.Mogrifun` upload filters (related config: `Plaroma.Upload/filters` in `config/config.exs`)
|
||||||
* Media preview proxy for still images (related config: `media_preview_proxy/enabled` in `config/config.exs`)
|
* Media preview proxy for still images (related config: `media_preview_proxy/enabled` in `config/config.exs`)
|
||||||
|
|
||||||
## `ffmpeg`
|
## `ffmpeg`
|
||||||
|
@ -29,5 +29,4 @@ It is required for the following Akkoma features:
|
||||||
`exiftool` is media files metadata reader/writer.
|
`exiftool` is media files metadata reader/writer.
|
||||||
|
|
||||||
It is required for the following Akkoma features:
|
It is required for the following Akkoma features:
|
||||||
* `Pleroma.Upload.Filters.Exiftool.StripMetadata` upload filter (related config: `Pleroma.Upload/filters` in `config/config.exs`)
|
* `Pleroma.Upload.Filters.Exiftool` upload filter (related config: `Plaroma.Upload/filters` in `config/config.exs`)
|
||||||
* `Pleroma.Upload.Filters.Exiftool.ReadDescription` upload filter (related config: `Pleroma.Upload/filters` in `config/config.exs`)
|
|
||||||
|
|
|
@ -5,11 +5,11 @@
|
||||||
This guide covers a installation using an OTP release. To install Akkoma from source, please check out the corresponding guide for your distro.
|
This guide covers a installation using an OTP release. To install Akkoma from source, please check out the corresponding guide for your distro.
|
||||||
|
|
||||||
## Pre-requisites
|
## Pre-requisites
|
||||||
* A machine running Linux with GNU (e.g. Debian, Ubuntu) or musl (e.g. Alpine) libc and an `x86_64` or `arm64` CPU you have root access to. If you are not sure if it's compatible see [Detecting flavour section](#detecting-flavour) below
|
* A machine running Linux with GNU (e.g. Debian, Ubuntu) or musl (e.g. Alpine) libc and an `x86_64` CPU you have root access to. If you are not sure if it's compatible see [Detecting flavour section](#detecting-flavour) below
|
||||||
* For installing OTP releases on RedHat-based distros like Fedora and Centos Stream, please follow [this guide](./otp_redhat_en.md) instead.
|
* For installing OTP releases on RedHat-based distros like Fedora and Centos Stream, please follow [this guide](./otp_redhat_en.md) instead.
|
||||||
* A (sub)domain pointed to the machine
|
* A (sub)domain pointed to the machine
|
||||||
|
|
||||||
You will be running commands as root. If you aren't root already, please elevate your priviledges by executing `sudo -i`/`su`.
|
You will be running commands as root. If you aren't root already, please elevate your priviledges by executing `sudo su`/`su`.
|
||||||
|
|
||||||
While in theory OTP releases are possbile to install on any compatible machine, for the sake of simplicity this guide focuses only on Debian/Ubuntu and Alpine.
|
While in theory OTP releases are possbile to install on any compatible machine, for the sake of simplicity this guide focuses only on Debian/Ubuntu and Alpine.
|
||||||
|
|
||||||
|
@ -176,24 +176,29 @@ su akkoma -s $SHELL -lc "./bin/pleroma stop"
|
||||||
|
|
||||||
### Setting up nginx and getting Let's Encrypt SSL certificaties
|
### Setting up nginx and getting Let's Encrypt SSL certificaties
|
||||||
|
|
||||||
|
#### Get a Let's Encrypt certificate
|
||||||
|
```sh
|
||||||
|
certbot certonly --standalone --preferred-challenges http -d yourinstance.tld
|
||||||
|
```
|
||||||
|
|
||||||
#### Copy Akkoma nginx configuration to the nginx folder
|
#### Copy Akkoma nginx configuration to the nginx folder
|
||||||
|
|
||||||
The location of nginx configs is dependent on the distro
|
The location of nginx configs is dependent on the distro
|
||||||
|
|
||||||
=== "Alpine"
|
=== "Alpine"
|
||||||
```
|
```
|
||||||
cp /opt/akkoma/installation/akkoma.nginx /etc/nginx/conf.d/akkoma.conf
|
cp /opt/akkoma/installation/nginx/akkoma.nginx /etc/nginx/conf.d/akkoma.conf
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "Debian/Ubuntu"
|
=== "Debian/Ubuntu"
|
||||||
```
|
```
|
||||||
cp /opt/akkoma/installation/akkoma.nginx /etc/nginx/sites-available/akkoma.conf
|
cp /opt/akkoma/installation/nginx/akkoma.nginx /etc/nginx/sites-available/akkoma.conf
|
||||||
ln -s /etc/nginx/sites-available/akkoma.conf /etc/nginx/sites-enabled/akkoma.conf
|
ln -s /etc/nginx/sites-available/akkoma.conf /etc/nginx/sites-enabled/akkoma.conf
|
||||||
```
|
```
|
||||||
|
|
||||||
If your distro does not have either of those you can append `include /etc/nginx/akkoma.conf` to the end of the http section in /etc/nginx/nginx.conf and
|
If your distro does not have either of those you can append `include /etc/nginx/akkoma.conf` to the end of the http section in /etc/nginx/nginx.conf and
|
||||||
```sh
|
```sh
|
||||||
cp /opt/akkoma/installation/akkoma.nginx /etc/nginx/akkoma.conf
|
cp /opt/akkoma/installation/nginx/akkoma.nginx /etc/nginx/akkoma.conf
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Edit the nginx config
|
#### Edit the nginx config
|
||||||
|
@ -204,14 +209,6 @@ $EDITOR path-to-nginx-config
|
||||||
# Verify that the config is valid
|
# Verify that the config is valid
|
||||||
nginx -t
|
nginx -t
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Get a Let's Encrypt certificate
|
|
||||||
```sh
|
|
||||||
certbot --nginx -d yourinstance.tld -d media.yourinstance.tld
|
|
||||||
```
|
|
||||||
|
|
||||||
If that doesn't work the first time, add `--dry-run` to further attempts to avoid being ratelimited as you identify the issue, and do not remove it until the dry run succeeds. A common source of problems are nginx config syntax errors; this can be checked for by running `nginx -t`.
|
|
||||||
|
|
||||||
#### Start nginx
|
#### Start nginx
|
||||||
|
|
||||||
=== "Alpine"
|
=== "Alpine"
|
||||||
|
@ -255,19 +252,32 @@ If everything worked, you should see Akkoma-FE when visiting your domain. If tha
|
||||||
## Post installation
|
## Post installation
|
||||||
|
|
||||||
### Setting up auto-renew of the Let's Encrypt certificate
|
### Setting up auto-renew of the Let's Encrypt certificate
|
||||||
|
```sh
|
||||||
|
# Create the directory for webroot challenges
|
||||||
|
mkdir -p /var/lib/letsencrypt
|
||||||
|
|
||||||
|
# Uncomment the webroot method
|
||||||
|
$EDITOR path-to-nginx-config
|
||||||
|
|
||||||
|
# Verify that the config is valid
|
||||||
|
nginx -t
|
||||||
|
```
|
||||||
|
|
||||||
=== "Alpine"
|
=== "Alpine"
|
||||||
```
|
```
|
||||||
|
# Restart nginx
|
||||||
|
rc-service nginx restart
|
||||||
|
|
||||||
# Start the cron daemon and make it start on boot
|
# Start the cron daemon and make it start on boot
|
||||||
rc-service crond start
|
rc-service crond start
|
||||||
rc-update add crond
|
rc-update add crond
|
||||||
|
|
||||||
# Ensure the webroot menthod and post hook is working
|
# Ensure the webroot menthod and post hook is working
|
||||||
certbot renew --cert-name yourinstance.tld --nginx --dry-run
|
certbot renew --cert-name yourinstance.tld --webroot -w /var/lib/letsencrypt/ --dry-run --post-hook 'rc-service nginx reload'
|
||||||
|
|
||||||
# Add it to the daily cron
|
# Add it to the daily cron
|
||||||
echo '#!/bin/sh
|
echo '#!/bin/sh
|
||||||
certbot renew --cert-name yourinstance.tld --nginx
|
certbot renew --cert-name yourinstance.tld --webroot -w /var/lib/letsencrypt/ --post-hook "rc-service nginx reload"
|
||||||
' > /etc/periodic/daily/renew-akkoma-cert
|
' > /etc/periodic/daily/renew-akkoma-cert
|
||||||
chmod +x /etc/periodic/daily/renew-akkoma-cert
|
chmod +x /etc/periodic/daily/renew-akkoma-cert
|
||||||
|
|
||||||
|
@ -276,7 +286,22 @@ If everything worked, you should see Akkoma-FE when visiting your domain. If tha
|
||||||
```
|
```
|
||||||
|
|
||||||
=== "Debian/Ubuntu"
|
=== "Debian/Ubuntu"
|
||||||
This should be automatically enabled with the `certbot-renew.timer` systemd unit.
|
```
|
||||||
|
# Restart nginx
|
||||||
|
systemctl restart nginx
|
||||||
|
|
||||||
|
# Ensure the webroot menthod and post hook is working
|
||||||
|
certbot renew --cert-name yourinstance.tld --webroot -w /var/lib/letsencrypt/ --dry-run --post-hook 'systemctl reload nginx'
|
||||||
|
|
||||||
|
# Add it to the daily cron
|
||||||
|
echo '#!/bin/sh
|
||||||
|
certbot renew --cert-name yourinstance.tld --webroot -w /var/lib/letsencrypt/ --post-hook "systemctl reload nginx"
|
||||||
|
' > /etc/cron.daily/renew-akkoma-cert
|
||||||
|
chmod +x /etc/cron.daily/renew-akkoma-cert
|
||||||
|
|
||||||
|
# If everything worked the output should contain /etc/cron.daily/renew-akkoma-cert
|
||||||
|
run-parts --test /etc/cron.daily
|
||||||
|
```
|
||||||
|
|
||||||
## Create your first user and set as admin
|
## Create your first user and set as admin
|
||||||
```sh
|
```sh
|
||||||
|
|
|
@ -82,7 +82,6 @@ Other than things bundled in the OTP release Akkoma depends on:
|
||||||
* PostgreSQL (also utilizes extensions in postgresql-contrib)
|
* PostgreSQL (also utilizes extensions in postgresql-contrib)
|
||||||
* nginx (could be swapped with another reverse proxy but this guide covers only it)
|
* nginx (could be swapped with another reverse proxy but this guide covers only it)
|
||||||
* certbot (for Let's Encrypt certificates, could be swapped with another ACME client, but this guide covers only it)
|
* certbot (for Let's Encrypt certificates, could be swapped with another ACME client, but this guide covers only it)
|
||||||
* If you are using certbot, also install the `python3-certbot-nginx` package for the nginx plugin
|
|
||||||
* libmagic/file
|
* libmagic/file
|
||||||
|
|
||||||
First, update your system, if not already done:
|
First, update your system, if not already done:
|
||||||
|
@ -170,10 +169,16 @@ sudo -Hu akkoma ./bin/pleroma stop
|
||||||
|
|
||||||
### Setting up nginx and getting Let's Encrypt SSL certificaties
|
### Setting up nginx and getting Let's Encrypt SSL certificaties
|
||||||
|
|
||||||
|
#### Get a Let's Encrypt certificate
|
||||||
|
|
||||||
|
```shell
|
||||||
|
certbot certonly --standalone --preferred-challenges http -d yourinstance.tld
|
||||||
|
```
|
||||||
|
|
||||||
#### Copy Akkoma nginx configuration to the nginx folder
|
#### Copy Akkoma nginx configuration to the nginx folder
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
cp /opt/akkoma/installation/akkoma.nginx /etc/nginx/conf.d/akkoma.conf
|
cp /opt/akkoma/installation/nginx/akkoma.nginx /etc/nginx/conf.d/akkoma.conf
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Edit the nginx config
|
#### Edit the nginx config
|
||||||
|
@ -190,15 +195,8 @@ sudo nginx -t
|
||||||
sudo systemctl start nginx
|
sudo systemctl start nginx
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Get a Let's Encrypt certificate
|
At this point if you open your (sub)domain in a browser you should see a 502 error, that's because Akkoma is not started yet.
|
||||||
|
|
||||||
```shell
|
|
||||||
sudo certbot --email <your@emailaddress> -d <yourdomain> -d <media_domain> --nginx
|
|
||||||
```
|
|
||||||
|
|
||||||
If that doesn't work the first time, add `--dry-run` to further attempts to avoid being ratelimited as you identify the issue, and do not remove it until the dry run succeeds. A common source of problems are nginx config syntax errors; this can be checked for by running `nginx -t`.
|
|
||||||
|
|
||||||
If you're successful with obtaining the certificates, opening your (sub)domain in a browser will result in a 502 error, since Akkoma hasn't been started yet.
|
|
||||||
|
|
||||||
### Setting up a system service
|
### Setting up a system service
|
||||||
|
|
||||||
|
@ -241,11 +239,19 @@ sudo nginx -t
|
||||||
# Restart nginx
|
# Restart nginx
|
||||||
sudo systemctl restart nginx
|
sudo systemctl restart nginx
|
||||||
|
|
||||||
# Test that renewals work properly
|
# Ensure the webroot menthod and post hook is working
|
||||||
sudo certbot renew --cert-name yourinstance.tld --nginx --dry-run
|
sudo certbot renew --cert-name yourinstance.tld --webroot -w /var/lib/letsencrypt/ --dry-run --post-hook 'systemctl reload nginx'
|
||||||
|
|
||||||
|
# Add it to the daily cron
|
||||||
|
echo '#!/bin/sh
|
||||||
|
certbot renew --cert-name yourinstance.tld --webroot -w /var/lib/letsencrypt/ --post-hook "systemctl reload nginx"
|
||||||
|
' > /etc/cron.daily/renew-akkoma-cert
|
||||||
|
sudo chmod +x /etc/cron.daily/renew-akkoma-cert
|
||||||
|
|
||||||
|
# If everything worked the output should contain /etc/cron.daily/renew-akkoma-cert
|
||||||
|
sudo run-parts --test /etc/cron.daily
|
||||||
```
|
```
|
||||||
|
|
||||||
Assuming the commands were run successfully, certbot should be able to renew your certificates automatically via the `certbot-renew.timer` systemd unit.
|
|
||||||
|
|
||||||
## Create your first user and set as admin
|
## Create your first user and set as admin
|
||||||
```shell
|
```shell
|
||||||
|
|
2
elixir_buildpack.config
Normal file
2
elixir_buildpack.config
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
elixir_version=1.14.3
|
||||||
|
erlang_version=25.3
|
|
@ -19,9 +19,6 @@ Environment="MIX_ENV=prod"
|
||||||
; Don't listen epmd on 0.0.0.0
|
; Don't listen epmd on 0.0.0.0
|
||||||
Environment="ERL_EPMD_ADDRESS=127.0.0.1"
|
Environment="ERL_EPMD_ADDRESS=127.0.0.1"
|
||||||
|
|
||||||
; Don't busy wait
|
|
||||||
Environment="ERL_AFLAGS=+sbwt none +sbwtdcpu none +sbwtdio none"
|
|
||||||
|
|
||||||
; Make sure that all paths fit your installation.
|
; Make sure that all paths fit your installation.
|
||||||
; Path to the home directory of the user running the Akkoma service.
|
; Path to the home directory of the user running the Akkoma service.
|
||||||
Environment="HOME=/var/lib/akkoma"
|
Environment="HOME=/var/lib/akkoma"
|
||||||
|
|
|
@ -60,7 +60,7 @@ ServerTokens Prod
|
||||||
Include /etc/letsencrypt/options-ssl-apache.conf
|
Include /etc/letsencrypt/options-ssl-apache.conf
|
||||||
|
|
||||||
# Uncomment the following to enable MediaProxy caching on disk
|
# Uncomment the following to enable MediaProxy caching on disk
|
||||||
#CacheRoot /var/tmp/akkoma-media-cache/
|
#CacheRoot /tmp/akkoma-media-cache/
|
||||||
#CacheDirLevels 1
|
#CacheDirLevels 1
|
||||||
#CacheDirLength 2
|
#CacheDirLength 2
|
||||||
#CacheEnable disk /proxy
|
#CacheEnable disk /proxy
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
SCRIPTNAME=${0##*/}
|
SCRIPTNAME=${0##*/}
|
||||||
|
|
||||||
# mod_disk_cache directory
|
# mod_disk_cache directory
|
||||||
CACHE_DIRECTORY="/var/tmp/akkoma-media-cache"
|
CACHE_DIRECTORY="/tmp/akkoma-media-cache"
|
||||||
|
|
||||||
## Removes an item via the htcacheclean utility
|
## Removes an item via the htcacheclean utility
|
||||||
## $1 - the filename, can be a pattern .
|
## $1 - the filename, can be a pattern .
|
||||||
|
|
|
@ -12,22 +12,26 @@ example.tld {
|
||||||
output file /var/log/caddy/akkoma.log
|
output file /var/log/caddy/akkoma.log
|
||||||
}
|
}
|
||||||
|
|
||||||
|
encode gzip
|
||||||
|
|
||||||
# this is explicitly IPv4 since Pleroma.Web.Endpoint binds on IPv4 only
|
# this is explicitly IPv4 since Pleroma.Web.Endpoint binds on IPv4 only
|
||||||
# and `localhost.` resolves to [::0] on some systems: see issue #930
|
# and `localhost.` resolves to [::0] on some systems: see issue #930
|
||||||
reverse_proxy 127.0.0.1:4000
|
reverse_proxy 127.0.0.1:4000
|
||||||
|
|
||||||
@mediaproxy path /media/* /proxy/*
|
# Uncomment if using a separate media subdomain
|
||||||
handle @mediaproxy {
|
#@mediaproxy path /media/* /proxy/*
|
||||||
redir https://media.example.tld{uri} permanent
|
#handle @mediaproxy {
|
||||||
}
|
# redir https://media.example.tld{uri} permanent
|
||||||
|
#}
|
||||||
}
|
}
|
||||||
|
|
||||||
media.example.tld {
|
# Uncomment if using a separate media subdomain
|
||||||
@mediaproxy path /media/* /proxy/*
|
#media.example.tld {
|
||||||
reverse_proxy @mediaproxy 127.0.0.1:4000 {
|
# @mediaproxy path /media/* /proxy/*
|
||||||
transport http {
|
# reverse_proxy @mediaproxy 127.0.0.1:4000 {
|
||||||
response_header_timeout 10s
|
# transport http {
|
||||||
read_timeout 15s
|
# response_header_timeout 10s
|
||||||
}
|
# read_timeout 15s
|
||||||
}
|
# }
|
||||||
}
|
# }
|
||||||
|
#}
|
||||||
|
|
|
@ -1,43 +1,23 @@
|
||||||
#!/sbin/openrc-run
|
#!/sbin/openrc-run
|
||||||
supervisor=supervise-daemon
|
supervisor=supervise-daemon
|
||||||
no_new_privs="yes"
|
command_user=akkoma:akkoma
|
||||||
|
command_background=1
|
||||||
|
# Ask process to terminate within 30 seconds, otherwise kill it
|
||||||
|
retry="SIGTERM/30/SIGKILL/5"
|
||||||
pidfile="/var/run/akkoma.pid"
|
pidfile="/var/run/akkoma.pid"
|
||||||
|
directory=/opt/akkoma
|
||||||
|
healthcheck_delay=60
|
||||||
|
healthcheck_timer=30
|
||||||
|
no_new_privs="yes"
|
||||||
|
|
||||||
# Ask process first to terminate itself within 60s, otherwise kill it
|
: ${akkoma_port:-4000}
|
||||||
retry="SIGTERM/60/SIGKILL/5"
|
|
||||||
|
|
||||||
# if you really want to use start-stop-daemon instead,
|
# Needs OpenRC >= 0.42
|
||||||
# also put the following in the config:
|
#respawn_max=0
|
||||||
# command_background=1
|
#respawn_delay=5
|
||||||
|
|
||||||
# Adjust defaults as needed in /etc/conf.d/akkoma;
|
|
||||||
# no need to directly edit the service file
|
|
||||||
command_user="${command_user:-akkoma:akkoma}"
|
|
||||||
directory="${directory:-/var/lib/akkoma/akkoma}"
|
|
||||||
akkoma_port="${akkoma_port:-4000}"
|
|
||||||
# whether to allow connecting a remote exlixir shell to the running Akkoma instance
|
|
||||||
akkoma_console=${akkoma_console:-NO}
|
|
||||||
|
|
||||||
output_log="${output_log:-/var/log/akkoma}"
|
|
||||||
error_log="${error_log:-/var/log/akkoma}"
|
|
||||||
|
|
||||||
# 0 means unlimited restarts
|
|
||||||
respawn_max="${respawn_max:-0}"
|
|
||||||
respawn_delay="${respawn_delay:-5}"
|
|
||||||
# define respawn period to only count crashes within a
|
|
||||||
# sliding time window towards respawn_max, e.g.:
|
|
||||||
# respawn_period=2850
|
|
||||||
|
|
||||||
healthcheck_delay="${healthcheck_delay:-60}"
|
|
||||||
healthcheck_timer="${healthcheck_timer:-30}"
|
|
||||||
|
|
||||||
MIX_ENV=prod
|
|
||||||
ERL_EPMD_ADDRESS="${ERL_EPMD_ADDRESS:-127.0.0.1}"
|
|
||||||
ERL_AFLAGS="${ERL_AFLAGS:-+sbwt none +sbwtdcpu none +sbwtdio none}"
|
|
||||||
supervise_daemon_args="${supervise_daemon_args} --env MIX_ENV=${MIX_ENV}"
|
|
||||||
supervise_daemon_args="${supervise_daemon_args} --env ERL_EPMD_ADDRESS=${ERL_EPMD_ADDRESS}"
|
|
||||||
supervise_daemon_args="${supervise_daemon_args} --env ERL_AFLAGS='${ERL_AFLAGS}'"
|
|
||||||
|
|
||||||
|
# put akkoma_console=YES in /etc/conf.d/akkoma if you want to be able to
|
||||||
|
# connect to akkoma via an elixir console
|
||||||
if yesno "${akkoma_console}"; then
|
if yesno "${akkoma_console}"; then
|
||||||
command=elixir
|
command=elixir
|
||||||
command_args="--name akkoma@127.0.0.1 --erl '-kernel inet_dist_listen_min 9001 inet_dist_listen_max 9001 inet_dist_use_interface {127,0,0,1}' -S mix phx.server"
|
command_args="--name akkoma@127.0.0.1 --erl '-kernel inet_dist_listen_min 9001 inet_dist_listen_max 9001 inet_dist_use_interface {127,0,0,1}' -S mix phx.server"
|
||||||
|
@ -51,24 +31,13 @@ else
|
||||||
command_args="phx.server"
|
command_args="phx.server"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
export MIX_ENV=prod
|
||||||
|
export ERL_EPMD_ADDRESS=127.0.0.1
|
||||||
|
|
||||||
depend() {
|
depend() {
|
||||||
need nginx postgresql
|
need nginx postgresql
|
||||||
}
|
}
|
||||||
|
|
||||||
start_pre() {
|
|
||||||
# Ensure logfile ownership and perms are alright
|
|
||||||
checkpath --file --owner "$command_user" "$output_log" "$error_log" \
|
|
||||||
|| eerror "Logfile(s) not owned by $command_user, or not a file!"
|
|
||||||
checkpath --writable "$output_log" "$error_log" \
|
|
||||||
|| eerror "Logfile(s) not writable!"
|
|
||||||
|
|
||||||
# If a recompile is needed perform it with lowest prio
|
|
||||||
# (delaying the actual start) to avoid hogging too much
|
|
||||||
# CPU from other services
|
|
||||||
cd "$directory"
|
|
||||||
doas -u "${command_user%%:*}" env MIX_ENV="$MIX_ENV" nice -n 19 "$command" compile
|
|
||||||
}
|
|
||||||
|
|
||||||
healthcheck() {
|
healthcheck() {
|
||||||
# put akkoma_health=YES in /etc/conf.d/akkoma if you want healthchecking
|
# put akkoma_health=YES in /etc/conf.d/akkoma if you want healthchecking
|
||||||
# and make sure you have curl installed
|
# and make sure you have curl installed
|
||||||
|
|
|
@ -1,9 +1,12 @@
|
||||||
# default nginx site config for Akkoma
|
# default nginx site config for Akkoma
|
||||||
#
|
#
|
||||||
# See the documentation at docs.akkoma.dev for your particular distro/OS for
|
# Simple installation instructions:
|
||||||
# installation instructions.
|
# 1. Install your TLS certificate, possibly using Let's Encrypt.
|
||||||
|
# 2. Replace 'example.tld' with your instance's domain wherever it appears.
|
||||||
|
# 3. Copy this file to /etc/nginx/sites-available/ and then add a symlink to it
|
||||||
|
# in /etc/nginx/sites-enabled/ and run 'nginx -s reload' or restart nginx.
|
||||||
|
|
||||||
proxy_cache_path /var/tmp/akkoma-media-cache levels=1:2 keys_zone=akkoma_media_cache:10m max_size=1g
|
proxy_cache_path /tmp/akkoma-media-cache levels=1:2 keys_zone=akkoma_media_cache:10m max_size=10g
|
||||||
inactive=720m use_temp_path=off;
|
inactive=720m use_temp_path=off;
|
||||||
|
|
||||||
# this is explicitly IPv4 since Pleroma.Web.Endpoint binds on IPv4 only
|
# this is explicitly IPv4 since Pleroma.Web.Endpoint binds on IPv4 only
|
||||||
|
@ -12,19 +15,25 @@ upstream phoenix {
|
||||||
server 127.0.0.1:4000 max_fails=5 fail_timeout=60s;
|
server 127.0.0.1:4000 max_fails=5 fail_timeout=60s;
|
||||||
}
|
}
|
||||||
|
|
||||||
# If you are setting up TLS certificates without certbot, uncomment the
|
server {
|
||||||
# following to enable HTTP -> HTTPS redirects. Certbot users don't need to do
|
server_name example.tld;
|
||||||
# this as it will automatically do this for you.
|
|
||||||
# server {
|
listen 80;
|
||||||
# server_name example.tld media.example.tld;
|
listen [::]:80;
|
||||||
#
|
|
||||||
# listen 80;
|
# Uncomment this if you need to use the 'webroot' method with certbot. Make sure
|
||||||
# listen [::]:80;
|
# that the directory exists and that it is accessible by the webserver. If you followed
|
||||||
#
|
# the guide, you already ran 'mkdir -p /var/lib/letsencrypt' to create the folder.
|
||||||
# location / {
|
# You may need to load this file with the ssl server block commented out, run certbot
|
||||||
# return 301 https://$server_name$request_uri;
|
# to get the certificate, and then uncomment it.
|
||||||
# }
|
#
|
||||||
# }
|
# location ~ /\.well-known/acme-challenge {
|
||||||
|
# root /var/lib/letsencrypt/;
|
||||||
|
# }
|
||||||
|
location / {
|
||||||
|
return 301 https://$server_name$request_uri;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
# Enable SSL session caching for improved performance
|
# Enable SSL session caching for improved performance
|
||||||
ssl_session_cache shared:ssl_session_cache:10m;
|
ssl_session_cache shared:ssl_session_cache:10m;
|
||||||
|
@ -32,29 +41,22 @@ ssl_session_cache shared:ssl_session_cache:10m;
|
||||||
server {
|
server {
|
||||||
server_name example.tld;
|
server_name example.tld;
|
||||||
|
|
||||||
# Once certbot is set up, this will automatically be updated to listen to
|
listen 443 ssl http2;
|
||||||
# port 443 with TLS alongside a redirect from plaintext HTTP.
|
listen [::]:443 ssl http2;
|
||||||
listen 80;
|
ssl_session_timeout 1d;
|
||||||
listen [::]:80;
|
ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
|
||||||
|
ssl_session_tickets off;
|
||||||
|
|
||||||
# If you are not using Certbot, comment out the above and uncomment/edit the following
|
ssl_trusted_certificate /etc/letsencrypt/live/example.tld/chain.pem;
|
||||||
# listen 443 ssl http2;
|
ssl_certificate /etc/letsencrypt/live/example.tld/fullchain.pem;
|
||||||
# listen [::]:443 ssl http2;
|
ssl_certificate_key /etc/letsencrypt/live/example.tld/privkey.pem;
|
||||||
# ssl_session_timeout 1d;
|
|
||||||
# ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
|
|
||||||
# ssl_session_tickets off;
|
|
||||||
#
|
|
||||||
# ssl_trusted_certificate /etc/letsencrypt/live/example.tld/chain.pem;
|
|
||||||
# ssl_certificate /etc/letsencrypt/live/example.tld/fullchain.pem;
|
|
||||||
# ssl_certificate_key /etc/letsencrypt/live/example.tld/privkey.pem;
|
|
||||||
#
|
|
||||||
# ssl_protocols TLSv1.2 TLSv1.3;
|
|
||||||
# ssl_ciphers "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4";
|
|
||||||
# ssl_prefer_server_ciphers off;
|
|
||||||
# ssl_ecdh_curve X25519:prime256v1:secp384r1:secp521r1;
|
|
||||||
# ssl_stapling on;
|
|
||||||
# ssl_stapling_verify on;
|
|
||||||
|
|
||||||
|
ssl_protocols TLSv1.2 TLSv1.3;
|
||||||
|
ssl_ciphers "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4";
|
||||||
|
ssl_prefer_server_ciphers off;
|
||||||
|
ssl_ecdh_curve X25519:prime256v1:secp384r1:secp521r1;
|
||||||
|
ssl_stapling on;
|
||||||
|
ssl_stapling_verify on;
|
||||||
|
|
||||||
gzip_vary on;
|
gzip_vary on;
|
||||||
gzip_proxied any;
|
gzip_proxied any;
|
||||||
|
@ -73,43 +75,9 @@ server {
|
||||||
proxy_set_header Host $http_host;
|
proxy_set_header Host $http_host;
|
||||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
|
||||||
location ~ ^/(media|proxy) {
|
|
||||||
return 404;
|
|
||||||
}
|
|
||||||
|
|
||||||
location / {
|
location / {
|
||||||
proxy_pass http://phoenix;
|
proxy_pass http://phoenix;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
# Upload and MediaProxy Subdomain
|
|
||||||
# (see main domain setup for more details)
|
|
||||||
server {
|
|
||||||
server_name media.example.tld;
|
|
||||||
|
|
||||||
# Same as above, will be updated to HTTPS once certbot is set up.
|
|
||||||
listen 80;
|
|
||||||
listen [::]:80;
|
|
||||||
|
|
||||||
# If you are not using certbot, comment the above and copy all the ssl
|
|
||||||
# stuff from above into here.
|
|
||||||
|
|
||||||
gzip_vary on;
|
|
||||||
gzip_proxied any;
|
|
||||||
gzip_comp_level 6;
|
|
||||||
gzip_buffers 16 8k;
|
|
||||||
gzip_http_version 1.1;
|
|
||||||
gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript application/activity+json application/atom+xml;
|
|
||||||
|
|
||||||
# the nginx default is 1m, not enough for large media uploads
|
|
||||||
client_max_body_size 16m;
|
|
||||||
ignore_invalid_headers off;
|
|
||||||
|
|
||||||
proxy_http_version 1.1;
|
|
||||||
proxy_set_header Upgrade $http_upgrade;
|
|
||||||
proxy_set_header Connection "upgrade";
|
|
||||||
proxy_set_header Host $http_host;
|
|
||||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
|
||||||
|
|
||||||
location ~ ^/(media|proxy) {
|
location ~ ^/(media|proxy) {
|
||||||
proxy_cache akkoma_media_cache;
|
proxy_cache akkoma_media_cache;
|
||||||
|
@ -123,8 +91,4 @@ server {
|
||||||
chunked_transfer_encoding on;
|
chunked_transfer_encoding on;
|
||||||
proxy_pass http://phoenix;
|
proxy_pass http://phoenix;
|
||||||
}
|
}
|
||||||
|
|
||||||
location / {
|
|
||||||
return 404;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
SCRIPTNAME=${0##*/}
|
SCRIPTNAME=${0##*/}
|
||||||
|
|
||||||
# NGINX cache directory
|
# NGINX cache directory
|
||||||
CACHE_DIRECTORY="/var/tmp/akkoma-media-cache"
|
CACHE_DIRECTORY="/tmp/akkoma-media-cache"
|
||||||
|
|
||||||
## Return the files where the items are cached.
|
## Return the files where the items are cached.
|
||||||
## $1 - the filename, can be a pattern .
|
## $1 - the filename, can be a pattern .
|
||||||
|
|
|
@ -16,7 +16,7 @@ defmodule Mix.Pleroma do
|
||||||
:fast_html,
|
:fast_html,
|
||||||
:oban
|
:oban
|
||||||
]
|
]
|
||||||
@cachex_children ["object", "user", "scrubber", "web_resp", "http_backoff"]
|
@cachex_children ["object", "user", "scrubber", "web_resp"]
|
||||||
@doc "Common functions to be reused in mix tasks"
|
@doc "Common functions to be reused in mix tasks"
|
||||||
def start_pleroma do
|
def start_pleroma do
|
||||||
Pleroma.Config.Holder.save_default()
|
Pleroma.Config.Holder.save_default()
|
||||||
|
@ -112,26 +112,18 @@ def shell_prompt(prompt, defval \\ nil, defname \\ nil) do
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def shell_info(message) when is_binary(message) or is_list(message) do
|
def shell_info(message) do
|
||||||
if mix_shell?(),
|
if mix_shell?(),
|
||||||
do: Mix.shell().info(message),
|
do: Mix.shell().info(message),
|
||||||
else: IO.puts(message)
|
else: IO.puts(message)
|
||||||
end
|
end
|
||||||
|
|
||||||
def shell_info(message) do
|
def shell_error(message) do
|
||||||
shell_info("#{inspect(message)}")
|
|
||||||
end
|
|
||||||
|
|
||||||
def shell_error(message) when is_binary(message) or is_list(message) do
|
|
||||||
if mix_shell?(),
|
if mix_shell?(),
|
||||||
do: Mix.shell().error(message),
|
do: Mix.shell().error(message),
|
||||||
else: IO.puts(:stderr, message)
|
else: IO.puts(:stderr, message)
|
||||||
end
|
end
|
||||||
|
|
||||||
def shell_error(message) do
|
|
||||||
shell_error("#{inspect(message)}")
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc "Performs a safe check whether `Mix.shell/0` is available (does not raise if Mix is not loaded)"
|
@doc "Performs a safe check whether `Mix.shell/0` is available (does not raise if Mix is not loaded)"
|
||||||
def mix_shell?, do: :erlang.function_exported(Mix, :shell, 0)
|
def mix_shell?, do: :erlang.function_exported(Mix, :shell, 0)
|
||||||
|
|
||||||
|
|
|
@ -8,6 +8,7 @@ defmodule Mix.Tasks.Pleroma.Activity do
|
||||||
alias Pleroma.User
|
alias Pleroma.User
|
||||||
alias Pleroma.Web.CommonAPI
|
alias Pleroma.Web.CommonAPI
|
||||||
alias Pleroma.Pagination
|
alias Pleroma.Pagination
|
||||||
|
require Logger
|
||||||
import Mix.Pleroma
|
import Mix.Pleroma
|
||||||
import Ecto.Query
|
import Ecto.Query
|
||||||
|
|
||||||
|
@ -16,7 +17,7 @@ def run(["get", id | _rest]) do
|
||||||
|
|
||||||
id
|
id
|
||||||
|> Activity.get_by_id()
|
|> Activity.get_by_id()
|
||||||
|> shell_info()
|
|> IO.inspect()
|
||||||
end
|
end
|
||||||
|
|
||||||
def run(["delete_by_keyword", user, keyword | _rest]) do
|
def run(["delete_by_keyword", user, keyword | _rest]) do
|
||||||
|
@ -34,7 +35,7 @@ def run(["delete_by_keyword", user, keyword | _rest]) do
|
||||||
)
|
)
|
||||||
|> Enum.map(fn x -> CommonAPI.delete(x.id, u) end)
|
|> Enum.map(fn x -> CommonAPI.delete(x.id, u) end)
|
||||||
|> Enum.count()
|
|> Enum.count()
|
||||||
|> shell_info()
|
|> IO.puts()
|
||||||
end
|
end
|
||||||
|
|
||||||
defp query_with(q, search_query) do
|
defp query_with(q, search_query) do
|
||||||
|
|
|
@ -20,102 +20,6 @@ defmodule Mix.Tasks.Pleroma.Database do
|
||||||
@shortdoc "A collection of database related tasks"
|
@shortdoc "A collection of database related tasks"
|
||||||
@moduledoc File.read!("docs/docs/administration/CLI_tasks/database.md")
|
@moduledoc File.read!("docs/docs/administration/CLI_tasks/database.md")
|
||||||
|
|
||||||
defp maybe_limit(query, limit_cnt) do
|
|
||||||
if is_number(limit_cnt) and limit_cnt > 0 do
|
|
||||||
limit(query, [], ^limit_cnt)
|
|
||||||
else
|
|
||||||
query
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
defp limit_statement(limit) when is_number(limit) do
|
|
||||||
if limit > 0 do
|
|
||||||
"LIMIT #{limit}"
|
|
||||||
else
|
|
||||||
""
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
defp prune_orphaned_activities_singles(limit) do
|
|
||||||
%{:num_rows => del_single} =
|
|
||||||
"""
|
|
||||||
delete from public.activities
|
|
||||||
where id in (
|
|
||||||
select a.id from public.activities a
|
|
||||||
left join public.objects o on a.data ->> 'object' = o.data ->> 'id'
|
|
||||||
left join public.activities a2 on a.data ->> 'object' = a2.data ->> 'id'
|
|
||||||
left join public.users u on a.data ->> 'object' = u.ap_id
|
|
||||||
where not a.local
|
|
||||||
and jsonb_typeof(a."data" -> 'object') = 'string'
|
|
||||||
and o.id is null
|
|
||||||
and a2.id is null
|
|
||||||
and u.id is null
|
|
||||||
#{limit_statement(limit)}
|
|
||||||
)
|
|
||||||
"""
|
|
||||||
|> Repo.query!([], timeout: :infinity)
|
|
||||||
|
|
||||||
Logger.info("Prune activity singles: deleted #{del_single} rows...")
|
|
||||||
del_single
|
|
||||||
end
|
|
||||||
|
|
||||||
defp prune_orphaned_activities_array(limit) do
|
|
||||||
%{:num_rows => del_array} =
|
|
||||||
"""
|
|
||||||
delete from public.activities
|
|
||||||
where id in (
|
|
||||||
select a.id from public.activities a
|
|
||||||
join json_array_elements_text((a."data" -> 'object')::json) as j
|
|
||||||
on a.data->>'type' = 'Flag'
|
|
||||||
left join public.objects o on j.value = o.data ->> 'id'
|
|
||||||
left join public.activities a2 on j.value = a2.data ->> 'id'
|
|
||||||
left join public.users u on j.value = u.ap_id
|
|
||||||
group by a.id
|
|
||||||
having max(o.data ->> 'id') is null
|
|
||||||
and max(a2.data ->> 'id') is null
|
|
||||||
and max(u.ap_id) is null
|
|
||||||
#{limit_statement(limit)}
|
|
||||||
)
|
|
||||||
"""
|
|
||||||
|> Repo.query!([], timeout: :infinity)
|
|
||||||
|
|
||||||
Logger.info("Prune activity arrays: deleted #{del_array} rows...")
|
|
||||||
del_array
|
|
||||||
end
|
|
||||||
|
|
||||||
def prune_orphaned_activities(limit \\ 0, opts \\ []) when is_number(limit) do
|
|
||||||
# Activities can either refer to a single object id, and array of object ids
|
|
||||||
# or contain an inlined object (at least after going through our normalisation)
|
|
||||||
#
|
|
||||||
# Flag is the only type we support with an array (and always has arrays).
|
|
||||||
# Update the only one with inlined objects.
|
|
||||||
#
|
|
||||||
# We already regularly purge old Delete, Undo, Update and Remove and if
|
|
||||||
# rejected Follow requests anyway; no need to explicitly deal with those here.
|
|
||||||
#
|
|
||||||
# Since there’s an index on types and there are typically only few Flag
|
|
||||||
# activites, it’s _much_ faster to utilise the index. To avoid accidentally
|
|
||||||
# deleting useful activities should more types be added, keep typeof for singles.
|
|
||||||
|
|
||||||
# Prune activities who link to an array of objects
|
|
||||||
del_array =
|
|
||||||
if Keyword.get(opts, :arrays, true) do
|
|
||||||
prune_orphaned_activities_array(limit)
|
|
||||||
else
|
|
||||||
0
|
|
||||||
end
|
|
||||||
|
|
||||||
# Prune activities who link to a single object
|
|
||||||
del_single =
|
|
||||||
if Keyword.get(opts, :singles, true) do
|
|
||||||
prune_orphaned_activities_singles(limit)
|
|
||||||
else
|
|
||||||
0
|
|
||||||
end
|
|
||||||
|
|
||||||
del_single + del_array
|
|
||||||
end
|
|
||||||
|
|
||||||
def run(["remove_embedded_objects" | args]) do
|
def run(["remove_embedded_objects" | args]) do
|
||||||
{options, [], []} =
|
{options, [], []} =
|
||||||
OptionParser.parse(
|
OptionParser.parse(
|
||||||
|
@ -158,37 +62,6 @@ def run(["update_users_following_followers_counts"]) do
|
||||||
)
|
)
|
||||||
end
|
end
|
||||||
|
|
||||||
def run(["prune_orphaned_activities" | args]) do
|
|
||||||
{options, [], []} =
|
|
||||||
OptionParser.parse(
|
|
||||||
args,
|
|
||||||
strict: [
|
|
||||||
limit: :integer,
|
|
||||||
singles: :boolean,
|
|
||||||
arrays: :boolean
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
start_pleroma()
|
|
||||||
|
|
||||||
{limit, options} = Keyword.pop(options, :limit, 0)
|
|
||||||
|
|
||||||
log_message = "Pruning orphaned activities"
|
|
||||||
|
|
||||||
log_message =
|
|
||||||
if limit > 0 do
|
|
||||||
log_message <> ", limiting deletion to #{limit} rows"
|
|
||||||
else
|
|
||||||
log_message
|
|
||||||
end
|
|
||||||
|
|
||||||
Logger.info(log_message)
|
|
||||||
|
|
||||||
deleted = prune_orphaned_activities(limit, options)
|
|
||||||
|
|
||||||
Logger.info("Deleted #{deleted} rows")
|
|
||||||
end
|
|
||||||
|
|
||||||
def run(["prune_objects" | args]) do
|
def run(["prune_objects" | args]) do
|
||||||
{options, [], []} =
|
{options, [], []} =
|
||||||
OptionParser.parse(
|
OptionParser.parse(
|
||||||
|
@ -197,8 +70,7 @@ def run(["prune_objects" | args]) do
|
||||||
vacuum: :boolean,
|
vacuum: :boolean,
|
||||||
keep_threads: :boolean,
|
keep_threads: :boolean,
|
||||||
keep_non_public: :boolean,
|
keep_non_public: :boolean,
|
||||||
prune_orphaned_activities: :boolean,
|
prune_orphaned_activities: :boolean
|
||||||
limit: :integer
|
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -207,8 +79,6 @@ def run(["prune_objects" | args]) do
|
||||||
deadline = Pleroma.Config.get([:instance, :remote_post_retention_days])
|
deadline = Pleroma.Config.get([:instance, :remote_post_retention_days])
|
||||||
time_deadline = NaiveDateTime.utc_now() |> NaiveDateTime.add(-(deadline * 86_400))
|
time_deadline = NaiveDateTime.utc_now() |> NaiveDateTime.add(-(deadline * 86_400))
|
||||||
|
|
||||||
limit_cnt = Keyword.get(options, :limit, 0)
|
|
||||||
|
|
||||||
log_message = "Pruning objects older than #{deadline} days"
|
log_message = "Pruning objects older than #{deadline} days"
|
||||||
|
|
||||||
log_message =
|
log_message =
|
||||||
|
@ -240,124 +110,129 @@ def run(["prune_objects" | args]) do
|
||||||
log_message
|
log_message
|
||||||
end
|
end
|
||||||
|
|
||||||
log_message =
|
|
||||||
if limit_cnt > 0 do
|
|
||||||
log_message <> ", limiting to #{limit_cnt} rows"
|
|
||||||
else
|
|
||||||
log_message
|
|
||||||
end
|
|
||||||
|
|
||||||
Logger.info(log_message)
|
Logger.info(log_message)
|
||||||
|
|
||||||
{del_obj, _} =
|
if Keyword.get(options, :keep_threads) do
|
||||||
if Keyword.get(options, :keep_threads) do
|
# We want to delete objects from threads where
|
||||||
# We want to delete objects from threads where
|
# 1. the newest post is still old
|
||||||
# 1. the newest post is still old
|
# 2. none of the activities is local
|
||||||
# 2. none of the activities is local
|
# 3. none of the activities is bookmarked
|
||||||
# 3. none of the activities is bookmarked
|
# 4. optionally none of the posts is non-public
|
||||||
# 4. optionally none of the posts is non-public
|
deletable_context =
|
||||||
deletable_context =
|
if Keyword.get(options, :keep_non_public) do
|
||||||
if Keyword.get(options, :keep_non_public) do
|
Pleroma.Activity
|
||||||
Pleroma.Activity
|
|> join(:left, [a], b in Pleroma.Bookmark, on: a.id == b.activity_id)
|
||||||
|> join(:left, [a], b in Pleroma.Bookmark, on: a.id == b.activity_id)
|
|> group_by([a], fragment("? ->> 'context'::text", a.data))
|
||||||
|> group_by([a], fragment("? ->> 'context'::text", a.data))
|
|> having(
|
||||||
|> having(
|
[a],
|
||||||
[a],
|
not fragment(
|
||||||
not fragment(
|
# Posts (checked on Create Activity) is non-public
|
||||||
# Posts (checked on Create Activity) is non-public
|
"bool_or((not(?->'to' \\? ? OR ?->'cc' \\? ?)) and ? ->> 'type' = 'Create')",
|
||||||
"bool_or((not(?->'to' \\? ? OR ?->'cc' \\? ?)) and ? ->> 'type' = 'Create')",
|
a.data,
|
||||||
a.data,
|
^Pleroma.Constants.as_public(),
|
||||||
^Pleroma.Constants.as_public(),
|
a.data,
|
||||||
a.data,
|
^Pleroma.Constants.as_public(),
|
||||||
^Pleroma.Constants.as_public(),
|
a.data
|
||||||
a.data
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
else
|
|
||||||
Pleroma.Activity
|
|
||||||
|> join(:left, [a], b in Pleroma.Bookmark, on: a.id == b.activity_id)
|
|
||||||
|> group_by([a], fragment("? ->> 'context'::text", a.data))
|
|
||||||
end
|
|
||||||
|> having([a], max(a.updated_at) < ^time_deadline)
|
|
||||||
|> having([a], not fragment("bool_or(?)", a.local))
|
|
||||||
|> having([_, b], fragment("max(?::text) is null", b.id))
|
|
||||||
|> maybe_limit(limit_cnt)
|
|
||||||
|> select([a], fragment("? ->> 'context'::text", a.data))
|
|
||||||
|
|
||||||
Pleroma.Object
|
|
||||||
|> where([o], fragment("? ->> 'context'::text", o.data) in subquery(deletable_context))
|
|
||||||
else
|
|
||||||
deletable =
|
|
||||||
if Keyword.get(options, :keep_non_public) do
|
|
||||||
Pleroma.Object
|
|
||||||
|> where(
|
|
||||||
[o],
|
|
||||||
fragment(
|
|
||||||
"?->'to' \\? ? OR ?->'cc' \\? ?",
|
|
||||||
o.data,
|
|
||||||
^Pleroma.Constants.as_public(),
|
|
||||||
o.data,
|
|
||||||
^Pleroma.Constants.as_public()
|
|
||||||
)
|
|
||||||
)
|
|
||||||
else
|
|
||||||
Pleroma.Object
|
|
||||||
end
|
|
||||||
|> where([o], o.updated_at < ^time_deadline)
|
|
||||||
|> where(
|
|
||||||
[o],
|
|
||||||
fragment("split_part(?->>'actor', '/', 3) != ?", o.data, ^Pleroma.Web.Endpoint.host())
|
|
||||||
)
|
)
|
||||||
|> maybe_limit(limit_cnt)
|
else
|
||||||
|> select([o], o.id)
|
Pleroma.Activity
|
||||||
|
|> join(:left, [a], b in Pleroma.Bookmark, on: a.id == b.activity_id)
|
||||||
|
|> group_by([a], fragment("? ->> 'context'::text", a.data))
|
||||||
|
end
|
||||||
|
|> having([a], max(a.updated_at) < ^time_deadline)
|
||||||
|
|> having([a], not fragment("bool_or(?)", a.local))
|
||||||
|
|> having([_, b], fragment("max(?::text) is null", b.id))
|
||||||
|
|> select([a], fragment("? ->> 'context'::text", a.data))
|
||||||
|
|
||||||
|
Pleroma.Object
|
||||||
|
|> where([o], fragment("? ->> 'context'::text", o.data) in subquery(deletable_context))
|
||||||
|
else
|
||||||
|
if Keyword.get(options, :keep_non_public) do
|
||||||
|
Pleroma.Object
|
||||||
|
|> where(
|
||||||
|
[o],
|
||||||
|
fragment(
|
||||||
|
"?->'to' \\? ? OR ?->'cc' \\? ?",
|
||||||
|
o.data,
|
||||||
|
^Pleroma.Constants.as_public(),
|
||||||
|
o.data,
|
||||||
|
^Pleroma.Constants.as_public()
|
||||||
|
)
|
||||||
|
)
|
||||||
|
else
|
||||||
Pleroma.Object
|
Pleroma.Object
|
||||||
|> where([o], o.id in subquery(deletable))
|
|
||||||
end
|
end
|
||||||
|> Repo.delete_all(timeout: :infinity)
|
|> where([o], o.updated_at < ^time_deadline)
|
||||||
|
|> where(
|
||||||
Logger.info("Deleted #{del_obj} objects...")
|
[o],
|
||||||
|
fragment("split_part(?->>'actor', '/', 3) != ?", o.data, ^Pleroma.Web.Endpoint.host())
|
||||||
|
)
|
||||||
|
end
|
||||||
|
|> Repo.delete_all(timeout: :infinity)
|
||||||
|
|
||||||
if !Keyword.get(options, :keep_threads) do
|
if !Keyword.get(options, :keep_threads) do
|
||||||
# Without the --keep-threads option, it's possible that bookmarked
|
# Without the --keep-threads option, it's possible that bookmarked
|
||||||
# objects have been deleted. We remove the corresponding bookmarks.
|
# objects have been deleted. We remove the corresponding bookmarks.
|
||||||
%{:num_rows => del_bookmarks} =
|
"""
|
||||||
"""
|
delete from public.bookmarks
|
||||||
delete from public.bookmarks
|
where id in (
|
||||||
where id in (
|
select b.id from public.bookmarks b
|
||||||
select b.id from public.bookmarks b
|
left join public.activities a on b.activity_id = a.id
|
||||||
left join public.activities a on b.activity_id = a.id
|
left join public.objects o on a."data" ->> 'object' = o.data ->> 'id'
|
||||||
left join public.objects o on a."data" ->> 'object' = o.data ->> 'id'
|
where o.id is null
|
||||||
where o.id is null
|
)
|
||||||
)
|
"""
|
||||||
"""
|
|> Repo.query([], timeout: :infinity)
|
||||||
|> Repo.query!([], timeout: :infinity)
|
|
||||||
|
|
||||||
Logger.info("Deleted #{del_bookmarks} orphaned bookmarks...")
|
|
||||||
end
|
end
|
||||||
|
|
||||||
if Keyword.get(options, :prune_orphaned_activities) do
|
if Keyword.get(options, :prune_orphaned_activities) do
|
||||||
del_activities = prune_orphaned_activities()
|
# Prune activities who link to a single object
|
||||||
Logger.info("Deleted #{del_activities} orphaned activities...")
|
"""
|
||||||
|
delete from public.activities
|
||||||
|
where id in (
|
||||||
|
select a.id from public.activities a
|
||||||
|
left join public.objects o on a.data ->> 'object' = o.data ->> 'id'
|
||||||
|
left join public.activities a2 on a.data ->> 'object' = a2.data ->> 'id'
|
||||||
|
left join public.users u on a.data ->> 'object' = u.ap_id
|
||||||
|
where not a.local
|
||||||
|
and jsonb_typeof(a."data" -> 'object') = 'string'
|
||||||
|
and o.id is null
|
||||||
|
and a2.id is null
|
||||||
|
and u.id is null
|
||||||
|
)
|
||||||
|
"""
|
||||||
|
|> Repo.query([], timeout: :infinity)
|
||||||
|
|
||||||
|
# Prune activities who link to an array of objects
|
||||||
|
"""
|
||||||
|
delete from public.activities
|
||||||
|
where id in (
|
||||||
|
select a.id from public.activities a
|
||||||
|
join json_array_elements_text((a."data" -> 'object')::json) as j on jsonb_typeof(a."data" -> 'object') = 'array'
|
||||||
|
left join public.objects o on j.value = o.data ->> 'id'
|
||||||
|
left join public.activities a2 on j.value = a2.data ->> 'id'
|
||||||
|
left join public.users u on j.value = u.ap_id
|
||||||
|
group by a.id
|
||||||
|
having max(o.data ->> 'id') is null
|
||||||
|
and max(a2.data ->> 'id') is null
|
||||||
|
and max(u.ap_id) is null
|
||||||
|
)
|
||||||
|
"""
|
||||||
|
|> Repo.query([], timeout: :infinity)
|
||||||
end
|
end
|
||||||
|
|
||||||
%{:num_rows => del_hashtags} =
|
"""
|
||||||
"""
|
DELETE FROM hashtags AS ht
|
||||||
DELETE FROM hashtags AS ht
|
WHERE NOT EXISTS (
|
||||||
WHERE NOT EXISTS (
|
SELECT 1 FROM hashtags_objects hto
|
||||||
SELECT 1 FROM hashtags_objects hto
|
WHERE ht.id = hto.hashtag_id)
|
||||||
WHERE ht.id = hto.hashtag_id)
|
"""
|
||||||
"""
|
|> Repo.query()
|
||||||
|> Repo.query!()
|
|
||||||
|
|
||||||
Logger.info("Deleted #{del_hashtags} no longer used hashtags...")
|
|
||||||
|
|
||||||
if Keyword.get(options, :vacuum) do
|
if Keyword.get(options, :vacuum) do
|
||||||
Logger.info("Starting vacuum...")
|
|
||||||
Maintenance.vacuum("full")
|
Maintenance.vacuum("full")
|
||||||
end
|
end
|
||||||
|
|
||||||
Logger.info("All done!")
|
|
||||||
end
|
end
|
||||||
|
|
||||||
def run(["prune_task"]) do
|
def run(["prune_task"]) do
|
||||||
|
|
|
@ -3,6 +3,7 @@ defmodule Mix.Tasks.Pleroma.Diagnostics do
|
||||||
alias Pleroma.Repo
|
alias Pleroma.Repo
|
||||||
alias Pleroma.User
|
alias Pleroma.User
|
||||||
|
|
||||||
|
require Logger
|
||||||
require Pleroma.Constants
|
require Pleroma.Constants
|
||||||
|
|
||||||
import Mix.Pleroma
|
import Mix.Pleroma
|
||||||
|
@ -13,20 +14,13 @@ def run(["http", url]) do
|
||||||
start_pleroma()
|
start_pleroma()
|
||||||
|
|
||||||
Pleroma.HTTP.get(url)
|
Pleroma.HTTP.get(url)
|
||||||
|> shell_info()
|
|
||||||
end
|
|
||||||
|
|
||||||
def run(["fetch_object", url]) do
|
|
||||||
start_pleroma()
|
|
||||||
|
|
||||||
Pleroma.Object.Fetcher.fetch_object_from_id(url)
|
|
||||||
|> IO.inspect()
|
|> IO.inspect()
|
||||||
end
|
end
|
||||||
|
|
||||||
def run(["home_timeline", nickname]) do
|
def run(["home_timeline", nickname]) do
|
||||||
start_pleroma()
|
start_pleroma()
|
||||||
user = Repo.get_by!(User, nickname: nickname)
|
user = Repo.get_by!(User, nickname: nickname)
|
||||||
shell_info("Home timeline query #{user.nickname}")
|
Logger.info("Home timeline query #{user.nickname}")
|
||||||
|
|
||||||
followed_hashtags =
|
followed_hashtags =
|
||||||
user
|
user
|
||||||
|
@ -55,14 +49,14 @@ def run(["home_timeline", nickname]) do
|
||||||
|> limit(20)
|
|> limit(20)
|
||||||
|
|
||||||
Ecto.Adapters.SQL.explain(Repo, :all, query, analyze: true, timeout: :infinity)
|
Ecto.Adapters.SQL.explain(Repo, :all, query, analyze: true, timeout: :infinity)
|
||||||
|> shell_info()
|
|> IO.puts()
|
||||||
end
|
end
|
||||||
|
|
||||||
def run(["user_timeline", nickname, reading_nickname]) do
|
def run(["user_timeline", nickname, reading_nickname]) do
|
||||||
start_pleroma()
|
start_pleroma()
|
||||||
user = Repo.get_by!(User, nickname: nickname)
|
user = Repo.get_by!(User, nickname: nickname)
|
||||||
reading_user = Repo.get_by!(User, nickname: reading_nickname)
|
reading_user = Repo.get_by!(User, nickname: reading_nickname)
|
||||||
shell_info("User timeline query #{user.nickname}")
|
Logger.info("User timeline query #{user.nickname}")
|
||||||
|
|
||||||
params =
|
params =
|
||||||
%{limit: 20}
|
%{limit: 20}
|
||||||
|
@ -86,7 +80,7 @@ def run(["user_timeline", nickname, reading_nickname]) do
|
||||||
|> limit(20)
|
|> limit(20)
|
||||||
|
|
||||||
Ecto.Adapters.SQL.explain(Repo, :all, query, analyze: true, timeout: :infinity)
|
Ecto.Adapters.SQL.explain(Repo, :all, query, analyze: true, timeout: :infinity)
|
||||||
|> shell_info()
|
|> IO.puts()
|
||||||
end
|
end
|
||||||
|
|
||||||
def run(["notifications", nickname]) do
|
def run(["notifications", nickname]) do
|
||||||
|
@ -102,7 +96,7 @@ def run(["notifications", nickname]) do
|
||||||
|> limit(20)
|
|> limit(20)
|
||||||
|
|
||||||
Ecto.Adapters.SQL.explain(Repo, :all, query, analyze: true, timeout: :infinity)
|
Ecto.Adapters.SQL.explain(Repo, :all, query, analyze: true, timeout: :infinity)
|
||||||
|> shell_info()
|
|> IO.puts()
|
||||||
end
|
end
|
||||||
|
|
||||||
def run(["known_network", nickname]) do
|
def run(["known_network", nickname]) do
|
||||||
|
@ -128,6 +122,6 @@ def run(["known_network", nickname]) do
|
||||||
|> limit(20)
|
|> limit(20)
|
||||||
|
|
||||||
Ecto.Adapters.SQL.explain(Repo, :all, query, analyze: true, timeout: :infinity)
|
Ecto.Adapters.SQL.explain(Repo, :all, query, analyze: true, timeout: :infinity)
|
||||||
|> shell_info()
|
|> IO.puts()
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
@ -27,11 +27,11 @@ def run(["ls-packs" | args]) do
|
||||||
]
|
]
|
||||||
|
|
||||||
for {param, value} <- to_print do
|
for {param, value} <- to_print do
|
||||||
shell_info(IO.ANSI.format([:bright, param, :normal, ": ", value]))
|
IO.puts(IO.ANSI.format([:bright, param, :normal, ": ", value]))
|
||||||
end
|
end
|
||||||
|
|
||||||
# A newline
|
# A newline
|
||||||
shell_info("")
|
IO.puts("")
|
||||||
end)
|
end)
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ def run(["get-packs" | args]) do
|
||||||
pack = manifest[pack_name]
|
pack = manifest[pack_name]
|
||||||
src = pack["src"]
|
src = pack["src"]
|
||||||
|
|
||||||
shell_info(
|
IO.puts(
|
||||||
IO.ANSI.format([
|
IO.ANSI.format([
|
||||||
"Downloading ",
|
"Downloading ",
|
||||||
:bright,
|
:bright,
|
||||||
|
@ -67,9 +67,9 @@ def run(["get-packs" | args]) do
|
||||||
sha_status_text = ["SHA256 of ", :bright, pack_name, :normal, " source file is ", :bright]
|
sha_status_text = ["SHA256 of ", :bright, pack_name, :normal, " source file is ", :bright]
|
||||||
|
|
||||||
if archive_sha == String.upcase(pack["src_sha256"]) do
|
if archive_sha == String.upcase(pack["src_sha256"]) do
|
||||||
shell_info(IO.ANSI.format(sha_status_text ++ [:green, "OK"]))
|
IO.puts(IO.ANSI.format(sha_status_text ++ [:green, "OK"]))
|
||||||
else
|
else
|
||||||
shell_info(IO.ANSI.format(sha_status_text ++ [:red, "BAD"]))
|
IO.puts(IO.ANSI.format(sha_status_text ++ [:red, "BAD"]))
|
||||||
|
|
||||||
raise "Bad SHA256 for #{pack_name}"
|
raise "Bad SHA256 for #{pack_name}"
|
||||||
end
|
end
|
||||||
|
@ -80,7 +80,7 @@ def run(["get-packs" | args]) do
|
||||||
|> Path.dirname()
|
|> Path.dirname()
|
||||||
|> Path.join(pack["files"])
|
|> Path.join(pack["files"])
|
||||||
|
|
||||||
shell_info(
|
IO.puts(
|
||||||
IO.ANSI.format([
|
IO.ANSI.format([
|
||||||
"Fetching the file list for ",
|
"Fetching the file list for ",
|
||||||
:bright,
|
:bright,
|
||||||
|
@ -94,7 +94,7 @@ def run(["get-packs" | args]) do
|
||||||
|
|
||||||
files = fetch_and_decode!(files_loc)
|
files = fetch_and_decode!(files_loc)
|
||||||
|
|
||||||
shell_info(IO.ANSI.format(["Unpacking ", :bright, pack_name]))
|
IO.puts(IO.ANSI.format(["Unpacking ", :bright, pack_name]))
|
||||||
|
|
||||||
pack_path =
|
pack_path =
|
||||||
Path.join([
|
Path.join([
|
||||||
|
@ -111,11 +111,11 @@ def run(["get-packs" | args]) do
|
||||||
|
|
||||||
{:ok, _} =
|
{:ok, _} =
|
||||||
:zip.unzip(binary_archive,
|
:zip.unzip(binary_archive,
|
||||||
cwd: to_charlist(pack_path),
|
cwd: pack_path,
|
||||||
file_list: files_to_unzip
|
file_list: files_to_unzip
|
||||||
)
|
)
|
||||||
|
|
||||||
shell_info(IO.ANSI.format(["Writing pack.json for ", :bright, pack_name]))
|
IO.puts(IO.ANSI.format(["Writing pack.json for ", :bright, pack_name]))
|
||||||
|
|
||||||
pack_json = %{
|
pack_json = %{
|
||||||
pack: %{
|
pack: %{
|
||||||
|
@ -130,9 +130,8 @@ def run(["get-packs" | args]) do
|
||||||
}
|
}
|
||||||
|
|
||||||
File.write!(Path.join(pack_path, "pack.json"), Jason.encode!(pack_json, pretty: true))
|
File.write!(Path.join(pack_path, "pack.json"), Jason.encode!(pack_json, pretty: true))
|
||||||
Pleroma.Emoji.reload()
|
|
||||||
else
|
else
|
||||||
shell_info(IO.ANSI.format([:bright, :red, "No pack named \"#{pack_name}\" found"]))
|
IO.puts(IO.ANSI.format([:bright, :red, "No pack named \"#{pack_name}\" found"]))
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
@ -180,14 +179,14 @@ def run(["gen-pack" | args]) do
|
||||||
custom_exts
|
custom_exts
|
||||||
end
|
end
|
||||||
|
|
||||||
shell_info("Using #{Enum.join(exts, " ")} extensions")
|
IO.puts("Using #{Enum.join(exts, " ")} extensions")
|
||||||
|
|
||||||
shell_info("Downloading the pack and generating SHA256")
|
IO.puts("Downloading the pack and generating SHA256")
|
||||||
|
|
||||||
{:ok, %{body: binary_archive}} = Pleroma.HTTP.get(src)
|
{:ok, %{body: binary_archive}} = Pleroma.HTTP.get(src)
|
||||||
archive_sha = :crypto.hash(:sha256, binary_archive) |> Base.encode16()
|
archive_sha = :crypto.hash(:sha256, binary_archive) |> Base.encode16()
|
||||||
|
|
||||||
shell_info("SHA256 is #{archive_sha}")
|
IO.puts("SHA256 is #{archive_sha}")
|
||||||
|
|
||||||
pack_json = %{
|
pack_json = %{
|
||||||
name => %{
|
name => %{
|
||||||
|
@ -208,7 +207,7 @@ def run(["gen-pack" | args]) do
|
||||||
|
|
||||||
File.write!(files_name, Jason.encode!(emoji_map, pretty: true))
|
File.write!(files_name, Jason.encode!(emoji_map, pretty: true))
|
||||||
|
|
||||||
shell_info("""
|
IO.puts("""
|
||||||
|
|
||||||
#{files_name} has been created and contains the list of all found emojis in the pack.
|
#{files_name} has been created and contains the list of all found emojis in the pack.
|
||||||
Please review the files in the pack and remove those not needed.
|
Please review the files in the pack and remove those not needed.
|
||||||
|
@ -230,20 +229,18 @@ def run(["gen-pack" | args]) do
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
shell_info("#{pack_file} has been updated with the #{name} pack")
|
IO.puts("#{pack_file} has been updated with the #{name} pack")
|
||||||
else
|
else
|
||||||
File.write!(pack_file, Jason.encode!(pack_json, pretty: true))
|
File.write!(pack_file, Jason.encode!(pack_json, pretty: true))
|
||||||
|
|
||||||
shell_info("#{pack_file} has been created with the #{name} pack")
|
IO.puts("#{pack_file} has been created with the #{name} pack")
|
||||||
end
|
end
|
||||||
|
|
||||||
Pleroma.Emoji.reload()
|
|
||||||
end
|
end
|
||||||
|
|
||||||
def run(["reload"]) do
|
def run(["reload"]) do
|
||||||
start_pleroma()
|
start_pleroma()
|
||||||
Pleroma.Emoji.reload()
|
Pleroma.Emoji.reload()
|
||||||
shell_info("Emoji packs have been reloaded.")
|
IO.puts("Emoji packs have been reloaded.")
|
||||||
end
|
end
|
||||||
|
|
||||||
defp fetch_and_decode!(from) do
|
defp fetch_and_decode!(from) do
|
||||||
|
|
|
@ -20,7 +20,6 @@ def run(["gen" | rest]) do
|
||||||
output: :string,
|
output: :string,
|
||||||
output_psql: :string,
|
output_psql: :string,
|
||||||
domain: :string,
|
domain: :string,
|
||||||
media_url: :string,
|
|
||||||
instance_name: :string,
|
instance_name: :string,
|
||||||
admin_email: :string,
|
admin_email: :string,
|
||||||
notify_email: :string,
|
notify_email: :string,
|
||||||
|
@ -35,9 +34,9 @@ def run(["gen" | rest]) do
|
||||||
static_dir: :string,
|
static_dir: :string,
|
||||||
listen_ip: :string,
|
listen_ip: :string,
|
||||||
listen_port: :string,
|
listen_port: :string,
|
||||||
strip_uploads_metadata: :string,
|
strip_uploads: :string,
|
||||||
read_uploads_description: :string,
|
anonymize_uploads: :string,
|
||||||
anonymize_uploads: :string
|
dedupe_uploads: :string
|
||||||
],
|
],
|
||||||
aliases: [
|
aliases: [
|
||||||
o: :output,
|
o: :output,
|
||||||
|
@ -65,14 +64,6 @@ def run(["gen" | rest]) do
|
||||||
":"
|
":"
|
||||||
) ++ [443]
|
) ++ [443]
|
||||||
|
|
||||||
media_url =
|
|
||||||
get_option(
|
|
||||||
options,
|
|
||||||
:media_url,
|
|
||||||
"What base url will uploads use? (e.g https://media.example.com/media)\n" <>
|
|
||||||
" Generally this should NOT use the same domain as the instance "
|
|
||||||
)
|
|
||||||
|
|
||||||
name =
|
name =
|
||||||
get_option(
|
get_option(
|
||||||
options,
|
options,
|
||||||
|
@ -170,38 +161,21 @@ def run(["gen" | rest]) do
|
||||||
)
|
)
|
||||||
|> Path.expand()
|
|> Path.expand()
|
||||||
|
|
||||||
{strip_uploads_metadata_message, strip_uploads_metadata_default} =
|
{strip_uploads_message, strip_uploads_default} =
|
||||||
if Pleroma.Utils.command_available?("exiftool") do
|
if Pleroma.Utils.command_available?("exiftool") do
|
||||||
{"Do you want to strip metadata from uploaded images? This requires exiftool, it was detected as installed. (y/n)",
|
{"Do you want to strip location (GPS) data from uploaded images? This requires exiftool, it was detected as installed. (y/n)",
|
||||||
"y"}
|
"y"}
|
||||||
else
|
else
|
||||||
{"Do you want to strip metadata from uploaded images? This requires exiftool, it was detected as not installed, please install it if you answer yes. (y/n)",
|
{"Do you want to strip location (GPS) data from uploaded images? This requires exiftool, it was detected as not installed, please install it if you answer yes. (y/n)",
|
||||||
"n"}
|
"n"}
|
||||||
end
|
end
|
||||||
|
|
||||||
strip_uploads_metadata =
|
strip_uploads =
|
||||||
get_option(
|
get_option(
|
||||||
options,
|
options,
|
||||||
:strip_uploads_metadata,
|
:strip_uploads,
|
||||||
strip_uploads_metadata_message,
|
strip_uploads_message,
|
||||||
strip_uploads_metadata_default
|
strip_uploads_default
|
||||||
) === "y"
|
|
||||||
|
|
||||||
{read_uploads_description_message, read_uploads_description_default} =
|
|
||||||
if Pleroma.Utils.command_available?("exiftool") do
|
|
||||||
{"Do you want to read data from uploaded files so clients can use it to prefill fields like image description? This requires exiftool, it was detected as installed. (y/n)",
|
|
||||||
"y"}
|
|
||||||
else
|
|
||||||
{"Do you want to read data from uploaded files so clients can use it to prefill fields like image description? This requires exiftool, it was detected as not installed, please install it if you answer yes. (y/n)",
|
|
||||||
"n"}
|
|
||||||
end
|
|
||||||
|
|
||||||
read_uploads_description =
|
|
||||||
get_option(
|
|
||||||
options,
|
|
||||||
:read_uploads_description,
|
|
||||||
read_uploads_description_message,
|
|
||||||
read_uploads_description_default
|
|
||||||
) === "y"
|
) === "y"
|
||||||
|
|
||||||
anonymize_uploads =
|
anonymize_uploads =
|
||||||
|
@ -212,6 +186,14 @@ def run(["gen" | rest]) do
|
||||||
"n"
|
"n"
|
||||||
) === "y"
|
) === "y"
|
||||||
|
|
||||||
|
dedupe_uploads =
|
||||||
|
get_option(
|
||||||
|
options,
|
||||||
|
:dedupe_uploads,
|
||||||
|
"Do you want to deduplicate uploaded files? (y/n)",
|
||||||
|
"n"
|
||||||
|
) === "y"
|
||||||
|
|
||||||
Config.put([:instance, :static_dir], static_dir)
|
Config.put([:instance, :static_dir], static_dir)
|
||||||
|
|
||||||
secret = :crypto.strong_rand_bytes(64) |> Base.encode64() |> binary_part(0, 64)
|
secret = :crypto.strong_rand_bytes(64) |> Base.encode64() |> binary_part(0, 64)
|
||||||
|
@ -225,7 +207,6 @@ def run(["gen" | rest]) do
|
||||||
EEx.eval_file(
|
EEx.eval_file(
|
||||||
template_dir <> "/sample_config.eex",
|
template_dir <> "/sample_config.eex",
|
||||||
domain: domain,
|
domain: domain,
|
||||||
media_url: media_url,
|
|
||||||
port: port,
|
port: port,
|
||||||
email: email,
|
email: email,
|
||||||
notify_email: notify_email,
|
notify_email: notify_email,
|
||||||
|
@ -248,9 +229,9 @@ def run(["gen" | rest]) do
|
||||||
listen_port: listen_port,
|
listen_port: listen_port,
|
||||||
upload_filters:
|
upload_filters:
|
||||||
upload_filters(%{
|
upload_filters(%{
|
||||||
strip_metadata: strip_uploads_metadata,
|
strip: strip_uploads,
|
||||||
read_description: read_uploads_description,
|
anonymize: anonymize_uploads,
|
||||||
anonymize: anonymize_uploads
|
dedupe: dedupe_uploads
|
||||||
})
|
})
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -324,20 +305,11 @@ defp write_robots_txt(static_dir, indexable, template_dir) do
|
||||||
end
|
end
|
||||||
|
|
||||||
defp upload_filters(filters) when is_map(filters) do
|
defp upload_filters(filters) when is_map(filters) do
|
||||||
enabled_filters = []
|
|
||||||
|
|
||||||
enabled_filters =
|
enabled_filters =
|
||||||
if filters.read_description do
|
if filters.strip do
|
||||||
enabled_filters ++ [Pleroma.Upload.Filter.Exiftool.ReadDescription]
|
[Pleroma.Upload.Filter.Exiftool]
|
||||||
else
|
else
|
||||||
enabled_filters
|
[]
|
||||||
end
|
|
||||||
|
|
||||||
enabled_filters =
|
|
||||||
if filters.strip_metadata do
|
|
||||||
enabled_filters ++ [Pleroma.Upload.Filter.Exiftool.StripMetadata]
|
|
||||||
else
|
|
||||||
enabled_filters
|
|
||||||
end
|
end
|
||||||
|
|
||||||
enabled_filters =
|
enabled_filters =
|
||||||
|
@ -347,6 +319,13 @@ defp upload_filters(filters) when is_map(filters) do
|
||||||
enabled_filters
|
enabled_filters
|
||||||
end
|
end
|
||||||
|
|
||||||
|
enabled_filters =
|
||||||
|
if filters.dedupe do
|
||||||
|
enabled_filters ++ [Pleroma.Upload.Filter.Dedupe]
|
||||||
|
else
|
||||||
|
enabled_filters
|
||||||
|
end
|
||||||
|
|
||||||
enabled_filters
|
enabled_filters
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
@ -11,6 +11,7 @@ defmodule Mix.Tasks.Pleroma.RefreshCounterCache do
|
||||||
alias Pleroma.CounterCache
|
alias Pleroma.CounterCache
|
||||||
alias Pleroma.Repo
|
alias Pleroma.Repo
|
||||||
|
|
||||||
|
require Logger
|
||||||
import Ecto.Query
|
import Ecto.Query
|
||||||
|
|
||||||
def run([]) do
|
def run([]) do
|
||||||
|
|
|
@ -30,12 +30,12 @@ def run(["index"]) do
|
||||||
meili_put(
|
meili_put(
|
||||||
"/indexes/objects/settings/ranking-rules",
|
"/indexes/objects/settings/ranking-rules",
|
||||||
[
|
[
|
||||||
|
"published:desc",
|
||||||
"words",
|
"words",
|
||||||
|
"exactness",
|
||||||
"proximity",
|
"proximity",
|
||||||
"typo",
|
"typo",
|
||||||
"exactness",
|
|
||||||
"attribute",
|
"attribute",
|
||||||
"published:desc",
|
|
||||||
"sort"
|
"sort"
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
@ -48,7 +48,7 @@ def run(["index"]) do
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
shell_info("Created indices. Starting to insert posts.")
|
IO.puts("Created indices. Starting to insert posts.")
|
||||||
|
|
||||||
chunk_size = Pleroma.Config.get([Pleroma.Search.Meilisearch, :initial_indexing_chunk_size])
|
chunk_size = Pleroma.Config.get([Pleroma.Search.Meilisearch, :initial_indexing_chunk_size])
|
||||||
|
|
||||||
|
@ -65,7 +65,7 @@ def run(["index"]) do
|
||||||
)
|
)
|
||||||
|
|
||||||
count = query |> Pleroma.Repo.aggregate(:count, :data)
|
count = query |> Pleroma.Repo.aggregate(:count, :data)
|
||||||
shell_info("Entries to index: #{count}")
|
IO.puts("Entries to index: #{count}")
|
||||||
|
|
||||||
Pleroma.Repo.stream(
|
Pleroma.Repo.stream(
|
||||||
query,
|
query,
|
||||||
|
@ -92,10 +92,10 @@ def run(["index"]) do
|
||||||
|
|
||||||
with {:ok, res} <- result do
|
with {:ok, res} <- result do
|
||||||
if not Map.has_key?(res, "indexUid") do
|
if not Map.has_key?(res, "indexUid") do
|
||||||
shell_info("\nFailed to index: #{inspect(result)}")
|
IO.puts("\nFailed to index: #{inspect(result)}")
|
||||||
end
|
end
|
||||||
else
|
else
|
||||||
e -> shell_error("\nFailed to index due to network error: #{inspect(e)}")
|
e -> IO.puts("\nFailed to index due to network error: #{inspect(e)}")
|
||||||
end
|
end
|
||||||
end)
|
end)
|
||||||
|> Stream.run()
|
|> Stream.run()
|
||||||
|
@ -126,15 +126,11 @@ def run(["show-keys", master_key]) do
|
||||||
decoded = Jason.decode!(result.body)
|
decoded = Jason.decode!(result.body)
|
||||||
|
|
||||||
if decoded["results"] do
|
if decoded["results"] do
|
||||||
Enum.each(decoded["results"], fn
|
Enum.each(decoded["results"], fn %{"description" => desc, "key" => key} ->
|
||||||
%{"name" => name, "key" => key} ->
|
IO.puts("#{desc}: #{key}")
|
||||||
shell_info("#{name}: #{key}")
|
|
||||||
|
|
||||||
%{"description" => desc, "key" => key} ->
|
|
||||||
shell_info("#{desc}: #{key}")
|
|
||||||
end)
|
end)
|
||||||
else
|
else
|
||||||
shell_error("Error fetching the keys, check the master key is correct: #{inspect(decoded)}")
|
IO.puts("Error fetching the keys, check the master key is correct: #{inspect(decoded)}")
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@ -142,7 +138,7 @@ def run(["stats"]) do
|
||||||
start_pleroma()
|
start_pleroma()
|
||||||
|
|
||||||
{:ok, result} = meili_get("/indexes/objects/stats")
|
{:ok, result} = meili_get("/indexes/objects/stats")
|
||||||
shell_info("Number of entries: #{result["numberOfDocuments"]}")
|
IO.puts("Number of entries: #{result["numberOfDocuments"]}")
|
||||||
shell_info("Indexing? #{result["isIndexing"]}")
|
IO.puts("Indexing? #{result["isIndexing"]}")
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
@ -1,330 +0,0 @@
|
||||||
# Akkoma: Magically expressive social media
|
|
||||||
# Copyright © 2024 Akkoma Authors <https://akkoma.dev/>
|
|
||||||
# SPDX-License-Identifier: AGPL-3.0-only
|
|
||||||
|
|
||||||
defmodule Mix.Tasks.Pleroma.Security do
|
|
||||||
use Mix.Task
|
|
||||||
import Ecto.Query
|
|
||||||
import Mix.Pleroma
|
|
||||||
|
|
||||||
alias Pleroma.Config
|
|
||||||
|
|
||||||
require Logger
|
|
||||||
|
|
||||||
@shortdoc """
|
|
||||||
Security-related tasks, like e.g. checking for signs past exploits were abused.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Constants etc
|
|
||||||
defp local_id_prefix(), do: Pleroma.Web.Endpoint.url() <> "/"
|
|
||||||
|
|
||||||
defp local_id_pattern(), do: local_id_prefix() <> "%"
|
|
||||||
|
|
||||||
@activity_exts ["activity+json", "activity%2Bjson"]
|
|
||||||
|
|
||||||
defp activity_ext_url_patterns() do
|
|
||||||
for e <- @activity_exts do
|
|
||||||
for suf <- ["", "?%"] do
|
|
||||||
# Escape literal % for use in SQL patterns
|
|
||||||
ee = String.replace(e, "%", "\\%")
|
|
||||||
"%.#{ee}#{suf}"
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|> List.flatten()
|
|
||||||
end
|
|
||||||
|
|
||||||
# Search for malicious uploads exploiting the lack of Content-Type sanitisation from before 2024-03
|
|
||||||
def run(["spoof-uploaded"]) do
|
|
||||||
Logger.put_process_level(self(), :notice)
|
|
||||||
start_pleroma()
|
|
||||||
|
|
||||||
shell_info("""
|
|
||||||
+------------------------+
|
|
||||||
| SPOOF SEARCH UPLOADS |
|
|
||||||
+------------------------+
|
|
||||||
Checking if any uploads are using privileged types.
|
|
||||||
NOTE if attachment deletion is enabled, payloads used
|
|
||||||
in the past may no longer exist.
|
|
||||||
""")
|
|
||||||
|
|
||||||
do_spoof_uploaded()
|
|
||||||
end
|
|
||||||
|
|
||||||
# Fuzzy search for potentially counterfeit activities in the database resulting from the same exploit
|
|
||||||
def run(["spoof-inserted"]) do
|
|
||||||
Logger.put_process_level(self(), :notice)
|
|
||||||
start_pleroma()
|
|
||||||
|
|
||||||
shell_info("""
|
|
||||||
+----------------------+
|
|
||||||
| SPOOF SEARCH NOTES |
|
|
||||||
+----------------------+
|
|
||||||
Starting fuzzy search for counterfeit activities.
|
|
||||||
NOTE this can not guarantee detecting all counterfeits
|
|
||||||
and may yield a small percentage of false positives.
|
|
||||||
""")
|
|
||||||
|
|
||||||
do_spoof_inserted()
|
|
||||||
end
|
|
||||||
|
|
||||||
# +-----------------------------+
|
|
||||||
# | S P O O F - U P L O A D E D |
|
|
||||||
# +-----------------------------+
|
|
||||||
defp do_spoof_uploaded() do
|
|
||||||
files =
|
|
||||||
case Config.get!([Pleroma.Upload, :uploader]) do
|
|
||||||
Pleroma.Uploaders.Local ->
|
|
||||||
uploads_search_spoofs_local_dir(Config.get!([Pleroma.Uploaders.Local, :uploads]))
|
|
||||||
|
|
||||||
_ ->
|
|
||||||
shell_info("""
|
|
||||||
NOTE:
|
|
||||||
Not using local uploader; thus not affected by this exploit.
|
|
||||||
It's impossible to check for files, but in case local uploader was used before
|
|
||||||
or to check if anyone futilely attempted a spoof, notes will still be scanned.
|
|
||||||
""")
|
|
||||||
|
|
||||||
[]
|
|
||||||
end
|
|
||||||
|
|
||||||
emoji = uploads_search_spoofs_local_dir(Config.get!([:instance, :static_dir]))
|
|
||||||
|
|
||||||
post_attachs = uploads_search_spoofs_notes()
|
|
||||||
|
|
||||||
not_orphaned_urls =
|
|
||||||
post_attachs
|
|
||||||
|> Enum.map(fn {_u, _a, url} -> url end)
|
|
||||||
|> MapSet.new()
|
|
||||||
|
|
||||||
orphaned_attachs = upload_search_orphaned_attachments(not_orphaned_urls)
|
|
||||||
|
|
||||||
shell_info("\nSearch concluded; here are the results:")
|
|
||||||
pretty_print_list_with_title(emoji, "Emoji")
|
|
||||||
pretty_print_list_with_title(files, "Uploaded Files")
|
|
||||||
pretty_print_list_with_title(post_attachs, "(Not Deleted) Post Attachments")
|
|
||||||
pretty_print_list_with_title(orphaned_attachs, "Orphaned Uploads")
|
|
||||||
|
|
||||||
shell_info("""
|
|
||||||
In total found
|
|
||||||
#{length(emoji)} emoji
|
|
||||||
#{length(files)} uploads
|
|
||||||
#{length(post_attachs)} not deleted posts
|
|
||||||
#{length(orphaned_attachs)} orphaned attachments
|
|
||||||
""")
|
|
||||||
end
|
|
||||||
|
|
||||||
defp uploads_search_spoofs_local_dir(dir) do
|
|
||||||
local_dir = String.replace_suffix(dir, "/", "")
|
|
||||||
|
|
||||||
shell_info("Searching for suspicious files in #{local_dir}...")
|
|
||||||
|
|
||||||
glob_ext = "{" <> Enum.join(@activity_exts, ",") <> "}"
|
|
||||||
|
|
||||||
Path.wildcard(local_dir <> "/**/*." <> glob_ext, match_dot: true)
|
|
||||||
|> Enum.map(fn path ->
|
|
||||||
String.replace_prefix(path, local_dir <> "/", "")
|
|
||||||
end)
|
|
||||||
|> Enum.sort()
|
|
||||||
end
|
|
||||||
|
|
||||||
defp uploads_search_spoofs_notes() do
|
|
||||||
shell_info("Now querying DB for posts with spoofing attachments. This might take a while...")
|
|
||||||
|
|
||||||
patterns = [local_id_pattern() | activity_ext_url_patterns()]
|
|
||||||
|
|
||||||
# if jsonb_array_elemsts in FROM can be used with normal Ecto functions, idk how
|
|
||||||
"""
|
|
||||||
SELECT DISTINCT a.data->>'actor', a.id, url->>'href'
|
|
||||||
FROM public.objects AS o JOIN public.activities AS a
|
|
||||||
ON o.data->>'id' = a.data->>'object',
|
|
||||||
jsonb_array_elements(o.data->'attachment') AS attachs,
|
|
||||||
jsonb_array_elements(attachs->'url') AS url
|
|
||||||
WHERE o.data->>'type' = 'Note' AND
|
|
||||||
o.data->>'id' LIKE $1::text AND (
|
|
||||||
url->>'href' LIKE $2::text OR
|
|
||||||
url->>'href' LIKE $3::text OR
|
|
||||||
url->>'href' LIKE $4::text OR
|
|
||||||
url->>'href' LIKE $5::text
|
|
||||||
)
|
|
||||||
ORDER BY a.data->>'actor', a.id, url->>'href';
|
|
||||||
"""
|
|
||||||
|> Pleroma.Repo.query!(patterns, timeout: :infinity)
|
|
||||||
|> map_raw_id_apid_tuple()
|
|
||||||
end
|
|
||||||
|
|
||||||
defp upload_search_orphaned_attachments(not_orphaned_urls) do
|
|
||||||
shell_info("""
|
|
||||||
Now querying DB for orphaned spoofing attachment (i.e. their post was deleted,
|
|
||||||
but if :cleanup_attachments was not enabled traces remain in the database)
|
|
||||||
This might take a bit...
|
|
||||||
""")
|
|
||||||
|
|
||||||
patterns = activity_ext_url_patterns()
|
|
||||||
|
|
||||||
"""
|
|
||||||
SELECT DISTINCT attach.id, url->>'href'
|
|
||||||
FROM public.objects AS attach,
|
|
||||||
jsonb_array_elements(attach.data->'url') AS url
|
|
||||||
WHERE (attach.data->>'type' = 'Image' OR
|
|
||||||
attach.data->>'type' = 'Document')
|
|
||||||
AND (
|
|
||||||
url->>'href' LIKE $1::text OR
|
|
||||||
url->>'href' LIKE $2::text OR
|
|
||||||
url->>'href' LIKE $3::text OR
|
|
||||||
url->>'href' LIKE $4::text
|
|
||||||
)
|
|
||||||
ORDER BY attach.id, url->>'href';
|
|
||||||
"""
|
|
||||||
|> Pleroma.Repo.query!(patterns, timeout: :infinity)
|
|
||||||
|> then(fn res -> Enum.map(res.rows, fn [id, url] -> {id, url} end) end)
|
|
||||||
|> Enum.filter(fn {_, url} -> !(url in not_orphaned_urls) end)
|
|
||||||
end
|
|
||||||
|
|
||||||
# +-----------------------------+
|
|
||||||
# | S P O O F - I N S E R T E D |
|
|
||||||
# +-----------------------------+
|
|
||||||
defp do_spoof_inserted() do
|
|
||||||
shell_info("""
|
|
||||||
Searching for local posts whose Create activity has no ActivityPub id...
|
|
||||||
This is a pretty good indicator, but only for spoofs of local actors
|
|
||||||
and only if the spoofing happened after around late 2021.
|
|
||||||
""")
|
|
||||||
|
|
||||||
idless_create =
|
|
||||||
search_local_notes_without_create_id()
|
|
||||||
|> Enum.sort()
|
|
||||||
|
|
||||||
shell_info("Done.\n")
|
|
||||||
|
|
||||||
shell_info("""
|
|
||||||
Now trying to weed out other poorly hidden spoofs.
|
|
||||||
This can't detect all and may have some false positives.
|
|
||||||
""")
|
|
||||||
|
|
||||||
likely_spoofed_posts_set = MapSet.new(idless_create)
|
|
||||||
|
|
||||||
sus_pattern_posts =
|
|
||||||
search_sus_notes_by_id_patterns()
|
|
||||||
|> Enum.filter(fn r -> !(r in likely_spoofed_posts_set) end)
|
|
||||||
|
|
||||||
shell_info("Done.\n")
|
|
||||||
|
|
||||||
shell_info("""
|
|
||||||
Finally, searching for spoofed, local user accounts.
|
|
||||||
(It's impossible to detect spoofed remote users)
|
|
||||||
""")
|
|
||||||
|
|
||||||
spoofed_users = search_bogus_local_users()
|
|
||||||
|
|
||||||
pretty_print_list_with_title(sus_pattern_posts, "Maybe Spoofed Posts")
|
|
||||||
pretty_print_list_with_title(idless_create, "Likely Spoofed Posts")
|
|
||||||
pretty_print_list_with_title(spoofed_users, "Spoofed local user accounts")
|
|
||||||
|
|
||||||
shell_info("""
|
|
||||||
In total found:
|
|
||||||
#{length(spoofed_users)} bogus users
|
|
||||||
#{length(idless_create)} likely spoofed posts
|
|
||||||
#{length(sus_pattern_posts)} maybe spoofed posts
|
|
||||||
""")
|
|
||||||
end
|
|
||||||
|
|
||||||
defp search_local_notes_without_create_id() do
|
|
||||||
Pleroma.Object
|
|
||||||
|> where([o], fragment("?->>'id' LIKE ?", o.data, ^local_id_pattern()))
|
|
||||||
|> join(:inner, [o], a in Pleroma.Activity,
|
|
||||||
on: fragment("?->>'object' = ?->>'id'", a.data, o.data)
|
|
||||||
)
|
|
||||||
|> where([o, a], fragment("NOT (? \\? 'id') OR ?->>'id' IS NULL", a.data, a.data))
|
|
||||||
|> select([o, a], {a.id, fragment("?->>'id'", o.data)})
|
|
||||||
|> order_by([o, a], a.id)
|
|
||||||
|> Pleroma.Repo.all(timeout: :infinity)
|
|
||||||
end
|
|
||||||
|
|
||||||
defp search_sus_notes_by_id_patterns() do
|
|
||||||
[ep1, ep2, ep3, ep4] = activity_ext_url_patterns()
|
|
||||||
|
|
||||||
Pleroma.Object
|
|
||||||
|> where(
|
|
||||||
[o],
|
|
||||||
# for local objects we know exactly how a genuine id looks like
|
|
||||||
# (though a thorough attacker can emulate this)
|
|
||||||
# for remote posts, use some best-effort patterns
|
|
||||||
fragment(
|
|
||||||
"""
|
|
||||||
(?->>'id' LIKE ? AND ?->>'id' NOT SIMILAR TO
|
|
||||||
? || 'objects/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}')
|
|
||||||
""",
|
|
||||||
o.data,
|
|
||||||
^local_id_pattern(),
|
|
||||||
o.data,
|
|
||||||
^local_id_prefix()
|
|
||||||
) or
|
|
||||||
fragment("?->>'id' LIKE ?", o.data, "%/emoji/%") or
|
|
||||||
fragment("?->>'id' LIKE ?", o.data, "%/media/%") or
|
|
||||||
fragment("?->>'id' LIKE ?", o.data, "%/proxy/%") or
|
|
||||||
fragment("?->>'id' LIKE ?", o.data, ^ep1) or
|
|
||||||
fragment("?->>'id' LIKE ?", o.data, ^ep2) or
|
|
||||||
fragment("?->>'id' LIKE ?", o.data, ^ep3) or
|
|
||||||
fragment("?->>'id' LIKE ?", o.data, ^ep4)
|
|
||||||
)
|
|
||||||
|> join(:inner, [o], a in Pleroma.Activity,
|
|
||||||
on: fragment("?->>'object' = ?->>'id'", a.data, o.data)
|
|
||||||
)
|
|
||||||
|> select([o, a], {a.id, fragment("?->>'id'", o.data)})
|
|
||||||
|> order_by([o, a], a.id)
|
|
||||||
|> Pleroma.Repo.all(timeout: :infinity)
|
|
||||||
end
|
|
||||||
|
|
||||||
defp search_bogus_local_users() do
|
|
||||||
Pleroma.User.Query.build(%{})
|
|
||||||
|> where([u], u.local == false and like(u.ap_id, ^local_id_pattern()))
|
|
||||||
|> order_by([u], u.ap_id)
|
|
||||||
|> select([u], u.ap_id)
|
|
||||||
|> Pleroma.Repo.all(timeout: :infinity)
|
|
||||||
end
|
|
||||||
|
|
||||||
# +-----------------------------------+
|
|
||||||
# | module-specific utility functions |
|
|
||||||
# +-----------------------------------+
|
|
||||||
defp pretty_print_list_with_title(list, title) do
|
|
||||||
title_len = String.length(title)
|
|
||||||
title_underline = String.duplicate("=", title_len)
|
|
||||||
shell_info(title)
|
|
||||||
shell_info(title_underline)
|
|
||||||
pretty_print_list(list)
|
|
||||||
end
|
|
||||||
|
|
||||||
defp pretty_print_list([]), do: shell_info("")
|
|
||||||
|
|
||||||
defp pretty_print_list([{a, o} | rest])
|
|
||||||
when (is_binary(a) or is_number(a)) and is_binary(o) do
|
|
||||||
shell_info(" {#{a}, #{o}}")
|
|
||||||
pretty_print_list(rest)
|
|
||||||
end
|
|
||||||
|
|
||||||
defp pretty_print_list([{u, a, o} | rest])
|
|
||||||
when is_binary(a) and is_binary(u) and is_binary(o) do
|
|
||||||
shell_info(" {#{u}, #{a}, #{o}}")
|
|
||||||
pretty_print_list(rest)
|
|
||||||
end
|
|
||||||
|
|
||||||
defp pretty_print_list([e | rest]) when is_binary(e) do
|
|
||||||
shell_info(" #{e}")
|
|
||||||
pretty_print_list(rest)
|
|
||||||
end
|
|
||||||
|
|
||||||
defp pretty_print_list([e | rest]), do: pretty_print_list([inspect(e) | rest])
|
|
||||||
|
|
||||||
defp map_raw_id_apid_tuple(res) do
|
|
||||||
user_prefix = local_id_prefix() <> "users/"
|
|
||||||
|
|
||||||
Enum.map(res.rows, fn
|
|
||||||
[uid, aid, oid] ->
|
|
||||||
{
|
|
||||||
String.replace_prefix(uid, user_prefix, ""),
|
|
||||||
FlakeId.to_string(aid),
|
|
||||||
oid
|
|
||||||
}
|
|
||||||
end)
|
|
||||||
end
|
|
||||||
end
|
|
|
@ -11,7 +11,6 @@ defmodule Mix.Tasks.Pleroma.User do
|
||||||
alias Pleroma.UserInviteToken
|
alias Pleroma.UserInviteToken
|
||||||
alias Pleroma.Web.ActivityPub.Builder
|
alias Pleroma.Web.ActivityPub.Builder
|
||||||
alias Pleroma.Web.ActivityPub.Pipeline
|
alias Pleroma.Web.ActivityPub.Pipeline
|
||||||
use Pleroma.Web, :verified_routes
|
|
||||||
|
|
||||||
@shortdoc "Manages Pleroma users"
|
@shortdoc "Manages Pleroma users"
|
||||||
@moduledoc File.read!("docs/docs/administration/CLI_tasks/user.md")
|
@moduledoc File.read!("docs/docs/administration/CLI_tasks/user.md")
|
||||||
|
@ -114,7 +113,11 @@ def run(["reset_password", nickname]) do
|
||||||
{:ok, token} <- Pleroma.PasswordResetToken.create_token(user) do
|
{:ok, token} <- Pleroma.PasswordResetToken.create_token(user) do
|
||||||
shell_info("Generated password reset token for #{user.nickname}")
|
shell_info("Generated password reset token for #{user.nickname}")
|
||||||
|
|
||||||
shell_info("URL: #{~p[/api/v1/pleroma/password_reset/#{token.token}]}")
|
IO.puts(
|
||||||
|
"URL: #{Pleroma.Web.Router.Helpers.reset_password_url(Pleroma.Web.Endpoint,
|
||||||
|
:reset,
|
||||||
|
token.token)}"
|
||||||
|
)
|
||||||
else
|
else
|
||||||
_ ->
|
_ ->
|
||||||
shell_error("No local user #{nickname}")
|
shell_error("No local user #{nickname}")
|
||||||
|
@ -300,8 +303,14 @@ def run(["invite" | rest]) do
|
||||||
{:ok, invite} <- UserInviteToken.create_invite(options) do
|
{:ok, invite} <- UserInviteToken.create_invite(options) do
|
||||||
shell_info("Generated user invite token " <> String.replace(invite.invite_type, "_", " "))
|
shell_info("Generated user invite token " <> String.replace(invite.invite_type, "_", " "))
|
||||||
|
|
||||||
url = url(~p[/registration/#{invite.token}])
|
url =
|
||||||
shell_info(url)
|
Pleroma.Web.Router.Helpers.redirect_url(
|
||||||
|
Pleroma.Web.Endpoint,
|
||||||
|
:registration_page,
|
||||||
|
invite.token
|
||||||
|
)
|
||||||
|
|
||||||
|
IO.puts(url)
|
||||||
else
|
else
|
||||||
error ->
|
error ->
|
||||||
shell_error("Could not create invite token: #{inspect(error)}")
|
shell_error("Could not create invite token: #{inspect(error)}")
|
||||||
|
@ -373,7 +382,7 @@ def run(["show", nickname]) do
|
||||||
nickname
|
nickname
|
||||||
|> User.get_cached_by_nickname()
|
|> User.get_cached_by_nickname()
|
||||||
|
|
||||||
shell_info(user)
|
shell_info("#{inspect(user)}")
|
||||||
end
|
end
|
||||||
|
|
||||||
def run(["send_confirmation", nickname]) do
|
def run(["send_confirmation", nickname]) do
|
||||||
|
@ -457,7 +466,7 @@ def run(["blocking", nickname]) do
|
||||||
|
|
||||||
with %User{local: true} = user <- User.get_cached_by_nickname(nickname) do
|
with %User{local: true} = user <- User.get_cached_by_nickname(nickname) do
|
||||||
blocks = User.following_ap_ids(user)
|
blocks = User.following_ap_ids(user)
|
||||||
shell_info(blocks)
|
IO.puts("#{inspect(blocks)}")
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@ -516,12 +525,12 @@ def run(["fix_follow_state", local_user, remote_user]) do
|
||||||
{:follow_data, Pleroma.Web.ActivityPub.Utils.fetch_latest_follow(local, remote)} do
|
{:follow_data, Pleroma.Web.ActivityPub.Utils.fetch_latest_follow(local, remote)} do
|
||||||
calculated_state = User.following?(local, remote)
|
calculated_state = User.following?(local, remote)
|
||||||
|
|
||||||
shell_info(
|
IO.puts(
|
||||||
"Request state is #{request_state}, vs calculated state of following=#{calculated_state}"
|
"Request state is #{request_state}, vs calculated state of following=#{calculated_state}"
|
||||||
)
|
)
|
||||||
|
|
||||||
if calculated_state == false && request_state == "accept" do
|
if calculated_state == false && request_state == "accept" do
|
||||||
shell_info("Discrepancy found, fixing")
|
IO.puts("Discrepancy found, fixing")
|
||||||
Pleroma.Web.CommonAPI.reject_follow_request(local, remote)
|
Pleroma.Web.CommonAPI.reject_follow_request(local, remote)
|
||||||
shell_info("Relationship fixed")
|
shell_info("Relationship fixed")
|
||||||
else
|
else
|
||||||
|
@ -551,14 +560,14 @@ defp refetch_public_keys(query) do
|
||||||
|> Stream.each(fn users ->
|
|> Stream.each(fn users ->
|
||||||
users
|
users
|
||||||
|> Enum.each(fn user ->
|
|> Enum.each(fn user ->
|
||||||
shell_info("Re-Resolving: #{user.ap_id}")
|
IO.puts("Re-Resolving: #{user.ap_id}")
|
||||||
|
|
||||||
with {:ok, user} <- Pleroma.User.fetch_by_ap_id(user.ap_id),
|
with {:ok, user} <- Pleroma.User.fetch_by_ap_id(user.ap_id),
|
||||||
changeset <- Pleroma.User.update_changeset(user),
|
changeset <- Pleroma.User.update_changeset(user),
|
||||||
{:ok, _user} <- Pleroma.User.update_and_set_cache(changeset) do
|
{:ok, _user} <- Pleroma.User.update_and_set_cache(changeset) do
|
||||||
:ok
|
:ok
|
||||||
else
|
else
|
||||||
error -> shell_info("Could not resolve: #{user.ap_id}, #{inspect(error)}")
|
error -> IO.puts("Could not resolve: #{user.ap_id}, #{inspect(error)}")
|
||||||
end
|
end
|
||||||
end)
|
end)
|
||||||
end)
|
end)
|
||||||
|
|
|
@ -26,6 +26,7 @@ def init(%Plug.Conn{method: "GET"} = conn, {endpoint, handler, transport}) do
|
||||||
conn
|
conn
|
||||||
|> fetch_query_params
|
|> fetch_query_params
|
||||||
|> Transport.transport_log(opts[:transport_log])
|
|> Transport.transport_log(opts[:transport_log])
|
||||||
|
|> Transport.force_ssl(handler, endpoint, opts)
|
||||||
|> Transport.check_origin(handler, endpoint, opts)
|
|> Transport.check_origin(handler, endpoint, opts)
|
||||||
|
|
||||||
case conn do
|
case conn do
|
||||||
|
|
|
@ -258,27 +258,6 @@ def get_create_by_object_ap_id(ap_id) when is_binary(ap_id) do
|
||||||
|
|
||||||
def get_create_by_object_ap_id(_), do: nil
|
def get_create_by_object_ap_id(_), do: nil
|
||||||
|
|
||||||
@doc """
|
|
||||||
Accepts a list of `ap__id`.
|
|
||||||
Returns a query yielding Create activities for the given objects,
|
|
||||||
in the same order as they were specified in the input list.
|
|
||||||
"""
|
|
||||||
@spec get_presorted_create_by_object_ap_id([String.t()]) :: Ecto.Queryable.t()
|
|
||||||
def get_presorted_create_by_object_ap_id(ap_ids) do
|
|
||||||
from(
|
|
||||||
a in Activity,
|
|
||||||
join:
|
|
||||||
ids in fragment(
|
|
||||||
"SELECT * FROM UNNEST(?::text[]) WITH ORDINALITY AS ids(ap_id, ord)",
|
|
||||||
^ap_ids
|
|
||||||
),
|
|
||||||
on:
|
|
||||||
ids.ap_id == fragment("?->>'object'", a.data) and
|
|
||||||
fragment("?->>'type'", a.data) == "Create",
|
|
||||||
order_by: [asc: ids.ord]
|
|
||||||
)
|
|
||||||
end
|
|
||||||
|
|
||||||
@doc """
|
@doc """
|
||||||
Accepts `ap_id` or list of `ap_id`.
|
Accepts `ap_id` or list of `ap_id`.
|
||||||
Returns a query.
|
Returns a query.
|
||||||
|
|
|
@ -28,7 +28,7 @@ defp get_cache_keys_for(activity_id) do
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def add_cache_key_for(activity_id, additional_key) do
|
defp add_cache_key_for(activity_id, additional_key) do
|
||||||
current = get_cache_keys_for(activity_id)
|
current = get_cache_keys_for(activity_id)
|
||||||
|
|
||||||
unless additional_key in current do
|
unless additional_key in current do
|
||||||
|
|
|
@ -26,15 +26,6 @@ def prune_undos do
|
||||||
|> Repo.delete_all(timeout: :infinity)
|
|> Repo.delete_all(timeout: :infinity)
|
||||||
end
|
end
|
||||||
|
|
||||||
def prune_updates do
|
|
||||||
before_time = cutoff()
|
|
||||||
|
|
||||||
from(a in Activity,
|
|
||||||
where: fragment("?->>'type' = ?", a.data, "Update") and a.inserted_at < ^before_time
|
|
||||||
)
|
|
||||||
|> Repo.delete_all(timeout: :infinity)
|
|
||||||
end
|
|
||||||
|
|
||||||
def prune_removes do
|
def prune_removes do
|
||||||
before_time = cutoff()
|
before_time = cutoff()
|
||||||
|
|
||||||
|
|
|
@ -95,17 +95,34 @@ def start(_type, _args) do
|
||||||
|
|
||||||
opts = [strategy: :one_for_one, name: Pleroma.Supervisor, max_restarts: max_restarts]
|
opts = [strategy: :one_for_one, name: Pleroma.Supervisor, max_restarts: max_restarts]
|
||||||
|
|
||||||
case Supervisor.start_link(children, opts) do
|
with {:ok, data} <- Supervisor.start_link(children, opts) do
|
||||||
{:ok, data} ->
|
set_postgres_server_version()
|
||||||
{:ok, data}
|
{:ok, data}
|
||||||
|
else
|
||||||
e ->
|
e ->
|
||||||
Logger.critical("Failed to start!")
|
Logger.error("Failed to start!")
|
||||||
Logger.critical("#{inspect(e)}")
|
Logger.error("#{inspect(e)}")
|
||||||
e
|
e
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
defp set_postgres_server_version do
|
||||||
|
version =
|
||||||
|
with %{rows: [[version]]} <- Ecto.Adapters.SQL.query!(Pleroma.Repo, "show server_version"),
|
||||||
|
{num, _} <- Float.parse(version) do
|
||||||
|
num
|
||||||
|
else
|
||||||
|
e ->
|
||||||
|
Logger.warning(
|
||||||
|
"Could not get the postgres version: #{inspect(e)}.\nSetting the default value of 9.6"
|
||||||
|
)
|
||||||
|
|
||||||
|
9.6
|
||||||
|
end
|
||||||
|
|
||||||
|
:persistent_term.put({Pleroma.Repo, :postgres_version}, version)
|
||||||
|
end
|
||||||
|
|
||||||
def load_custom_modules do
|
def load_custom_modules do
|
||||||
dir = Config.get([:modules, :runtime_dir])
|
dir = Config.get([:modules, :runtime_dir])
|
||||||
|
|
||||||
|
@ -162,9 +179,7 @@ defp cachex_children do
|
||||||
build_cachex("translations", default_ttl: :timer.hours(24 * 30), limit: 2500),
|
build_cachex("translations", default_ttl: :timer.hours(24 * 30), limit: 2500),
|
||||||
build_cachex("instances", default_ttl: :timer.hours(24), ttl_interval: 1000, limit: 2500),
|
build_cachex("instances", default_ttl: :timer.hours(24), ttl_interval: 1000, limit: 2500),
|
||||||
build_cachex("request_signatures", default_ttl: :timer.hours(24 * 30), limit: 3000),
|
build_cachex("request_signatures", default_ttl: :timer.hours(24 * 30), limit: 3000),
|
||||||
build_cachex("rel_me", default_ttl: :timer.hours(24 * 30), limit: 300),
|
build_cachex("rel_me", default_ttl: :timer.hours(24 * 30), limit: 300)
|
||||||
build_cachex("host_meta", default_ttl: :timer.minutes(120), limit: 5000),
|
|
||||||
build_cachex("http_backoff", default_ttl: :timer.hours(24 * 30), limit: 10000)
|
|
||||||
]
|
]
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@ -264,9 +279,7 @@ def limiters_setup do
|
||||||
defp http_children do
|
defp http_children do
|
||||||
proxy_url = Config.get([:http, :proxy_url])
|
proxy_url = Config.get([:http, :proxy_url])
|
||||||
proxy = Pleroma.HTTP.AdapterHelper.format_proxy(proxy_url)
|
proxy = Pleroma.HTTP.AdapterHelper.format_proxy(proxy_url)
|
||||||
pool_size = Config.get([:http, :pool_size], 10)
|
pool_size = Config.get([:http, :pool_size])
|
||||||
pool_timeout = Config.get([:http, :pool_timeout], 60_000)
|
|
||||||
connection_timeout = Config.get([:http, :conn_max_idle_time], 10_000)
|
|
||||||
|
|
||||||
:public_key.cacerts_load()
|
:public_key.cacerts_load()
|
||||||
|
|
||||||
|
@ -275,9 +288,6 @@ defp http_children do
|
||||||
|> Config.get([])
|
|> Config.get([])
|
||||||
|> Pleroma.HTTP.AdapterHelper.add_pool_size(pool_size)
|
|> Pleroma.HTTP.AdapterHelper.add_pool_size(pool_size)
|
||||||
|> Pleroma.HTTP.AdapterHelper.maybe_add_proxy_pool(proxy)
|
|> Pleroma.HTTP.AdapterHelper.maybe_add_proxy_pool(proxy)
|
||||||
|> Pleroma.HTTP.AdapterHelper.ensure_ipv6()
|
|
||||||
|> Pleroma.HTTP.AdapterHelper.add_default_conn_max_idle_time(connection_timeout)
|
|
||||||
|> Pleroma.HTTP.AdapterHelper.add_default_pool_max_idle_time(pool_timeout)
|
|
||||||
|> Keyword.put(:name, MyFinch)
|
|> Keyword.put(:name, MyFinch)
|
||||||
|
|
||||||
[{Finch, config}]
|
[{Finch, config}]
|
||||||
|
|
|
@ -164,8 +164,7 @@ defp do_check_rum!(setting, migrate) do
|
||||||
|
|
||||||
defp check_system_commands!(:ok) do
|
defp check_system_commands!(:ok) do
|
||||||
filter_commands_statuses = [
|
filter_commands_statuses = [
|
||||||
check_filter(Pleroma.Upload.Filter.Exiftool.StripMetadata, "exiftool"),
|
check_filter(Pleroma.Upload.Filter.Exiftool, "exiftool"),
|
||||||
check_filter(Pleroma.Upload.Filter.Exiftool.ReadDescription, "exiftool"),
|
|
||||||
check_filter(Pleroma.Upload.Filter.Mogrify, "mogrify"),
|
check_filter(Pleroma.Upload.Filter.Mogrify, "mogrify"),
|
||||||
check_filter(Pleroma.Upload.Filter.Mogrifun, "mogrify"),
|
check_filter(Pleroma.Upload.Filter.Mogrifun, "mogrify"),
|
||||||
check_filter(Pleroma.Upload.Filter.AnalyzeMetadata, "mogrify"),
|
check_filter(Pleroma.Upload.Filter.AnalyzeMetadata, "mogrify"),
|
||||||
|
|
|
@ -68,10 +68,7 @@ defp fetch_page_items(id, items \\ []) do
|
||||||
items
|
items
|
||||||
end
|
end
|
||||||
else
|
else
|
||||||
{:error, :not_found} ->
|
{:error, {"Object has been deleted", _, _}} ->
|
||||||
items
|
|
||||||
|
|
||||||
{:error, :forbidden} ->
|
|
||||||
items
|
items
|
||||||
|
|
||||||
{:error, error} ->
|
{:error, error} ->
|
||||||
|
|
|
@ -22,43 +22,6 @@ defmodule Pleroma.Config.DeprecationWarnings do
|
||||||
"\n* `config :pleroma, :instance, :quarantined_instances` is now covered by `:pleroma, :mrf_simple, :reject`"}
|
"\n* `config :pleroma, :instance, :quarantined_instances` is now covered by `:pleroma, :mrf_simple, :reject`"}
|
||||||
]
|
]
|
||||||
|
|
||||||
def check_exiftool_filter do
|
|
||||||
filters = Config.get([Pleroma.Upload]) |> Keyword.get(:filters, [])
|
|
||||||
|
|
||||||
if Pleroma.Upload.Filter.Exiftool in filters do
|
|
||||||
Logger.warning("""
|
|
||||||
!!!DEPRECATION WARNING!!!
|
|
||||||
Your config is using Exiftool as a filter instead of Exiftool.StripMetadata. This should work for now, but you are advised to change to the new configuration to prevent possible issues later:
|
|
||||||
|
|
||||||
```
|
|
||||||
config :pleroma, Pleroma.Upload,
|
|
||||||
filters: [Pleroma.Upload.Filter.Exiftool]
|
|
||||||
```
|
|
||||||
|
|
||||||
Is now
|
|
||||||
|
|
||||||
|
|
||||||
```
|
|
||||||
config :pleroma, Pleroma.Upload,
|
|
||||||
filters: [Pleroma.Upload.Filter.Exiftool.StripMetadata]
|
|
||||||
```
|
|
||||||
""")
|
|
||||||
|
|
||||||
new_config =
|
|
||||||
filters
|
|
||||||
|> Enum.map(fn
|
|
||||||
Pleroma.Upload.Filter.Exiftool -> Pleroma.Upload.Filter.Exiftool.StripMetadata
|
|
||||||
filter -> filter
|
|
||||||
end)
|
|
||||||
|
|
||||||
Config.put([Pleroma.Upload, :filters], new_config)
|
|
||||||
|
|
||||||
:error
|
|
||||||
else
|
|
||||||
:ok
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
def check_simple_policy_tuples do
|
def check_simple_policy_tuples do
|
||||||
has_strings =
|
has_strings =
|
||||||
Config.get([:mrf_simple])
|
Config.get([:mrf_simple])
|
||||||
|
@ -219,10 +182,7 @@ def warn do
|
||||||
check_quarantined_instances_tuples(),
|
check_quarantined_instances_tuples(),
|
||||||
check_transparency_exclusions_tuples(),
|
check_transparency_exclusions_tuples(),
|
||||||
check_simple_policy_tuples(),
|
check_simple_policy_tuples(),
|
||||||
check_http_adapter(),
|
check_http_adapter()
|
||||||
check_uploader_base_url_set(),
|
|
||||||
check_uploader_base_url_is_not_base_domain(),
|
|
||||||
check_exiftool_filter()
|
|
||||||
]
|
]
|
||||||
|> Enum.reduce(:ok, fn
|
|> Enum.reduce(:ok, fn
|
||||||
:ok, :ok -> :ok
|
:ok, :ok -> :ok
|
||||||
|
@ -377,54 +337,4 @@ def check_uploders_s3_public_endpoint do
|
||||||
:ok
|
:ok
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
def check_uploader_base_url_set() do
|
|
||||||
uses_local_uploader? = Config.get([Pleroma.Upload, :uploader]) == Pleroma.Uploaders.Local
|
|
||||||
base_url = Pleroma.Config.get([Pleroma.Upload, :base_url])
|
|
||||||
|
|
||||||
if base_url || !uses_local_uploader? do
|
|
||||||
:ok
|
|
||||||
else
|
|
||||||
Logger.error("""
|
|
||||||
!!!WARNING!!!
|
|
||||||
Your config does not specify a base_url for uploads!
|
|
||||||
Please make the following change:\n
|
|
||||||
\n* `config :pleroma, Pleroma.Upload, base_url: "https://example.com/media/`
|
|
||||||
\n
|
|
||||||
\nPlease note that it is HEAVILY recommended to use a subdomain to host user-uploaded media!
|
|
||||||
""")
|
|
||||||
|
|
||||||
# This is a hard exit - the uploader will not work without a base_url
|
|
||||||
raise ArgumentError, message: "No base_url set for uploads - please set one in your config!"
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
def check_uploader_base_url_is_not_base_domain() do
|
|
||||||
uses_local_uploader? = Config.get([Pleroma.Upload, :uploader]) == Pleroma.Uploaders.Local
|
|
||||||
|
|
||||||
uploader_host =
|
|
||||||
[Pleroma.Upload, :base_url]
|
|
||||||
|> Pleroma.Config.get()
|
|
||||||
|> URI.parse()
|
|
||||||
|> Map.get(:host)
|
|
||||||
|
|
||||||
akkoma_host =
|
|
||||||
[Pleroma.Web.Endpoint, :url]
|
|
||||||
|> Pleroma.Config.get()
|
|
||||||
|> Keyword.get(:host)
|
|
||||||
|
|
||||||
if uploader_host == akkoma_host && uses_local_uploader? do
|
|
||||||
Logger.error("""
|
|
||||||
!!!WARNING!!!
|
|
||||||
Your Akkoma Host and your Upload base_url's host are the same!
|
|
||||||
This can potentially be insecure!
|
|
||||||
|
|
||||||
It is HIGHLY recommended that you migrate your media uploads
|
|
||||||
to a subdomain at your earliest convenience
|
|
||||||
""")
|
|
||||||
end
|
|
||||||
|
|
||||||
# This isn't actually an error condition, just a warning
|
|
||||||
:ok
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
|
|
|
@ -23,7 +23,7 @@ def load(config, opts) do
|
||||||
with_runtime_config =
|
with_runtime_config =
|
||||||
if File.exists?(config_path) do
|
if File.exists?(config_path) do
|
||||||
# <https://git.pleroma.social/pleroma/pleroma/-/issues/3135>
|
# <https://git.pleroma.social/pleroma/pleroma/-/issues/3135>
|
||||||
%File.Stat{mode: mode} = File.stat!(config_path)
|
%File.Stat{mode: mode} = File.lstat!(config_path)
|
||||||
|
|
||||||
if Bitwise.band(mode, 0o007) > 0 do
|
if Bitwise.band(mode, 0o007) > 0 do
|
||||||
raise "Configuration at #{config_path} has world-permissions, execute the following: chmod o= #{config_path}"
|
raise "Configuration at #{config_path} has world-permissions, execute the following: chmod o= #{config_path}"
|
||||||
|
|
|
@ -24,6 +24,7 @@ defp reboot_time_keys,
|
||||||
defp reboot_time_subkeys,
|
defp reboot_time_subkeys,
|
||||||
do: [
|
do: [
|
||||||
{:pleroma, Pleroma.Captcha, [:seconds_valid]},
|
{:pleroma, Pleroma.Captcha, [:seconds_valid]},
|
||||||
|
{:pleroma, Pleroma.Upload, [:proxy_remote]},
|
||||||
{:pleroma, :instance, [:upload_limit]},
|
{:pleroma, :instance, [:upload_limit]},
|
||||||
{:pleroma, :http, [:pool_size]},
|
{:pleroma, :http, [:pool_size]},
|
||||||
{:pleroma, :http, [:proxy_url]}
|
{:pleroma, :http, [:proxy_url]}
|
||||||
|
|
|
@ -64,7 +64,4 @@ defmodule Pleroma.Constants do
|
||||||
"Service"
|
"Service"
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
# Internally used as top-level types for media attachments and user images
|
|
||||||
const(attachment_types, do: ["Document", "Image"])
|
|
||||||
end
|
end
|
||||||
|
|
|
@ -6,13 +6,10 @@ defmodule Pleroma.Emails.AdminEmail do
|
||||||
@moduledoc "Admin emails"
|
@moduledoc "Admin emails"
|
||||||
|
|
||||||
import Swoosh.Email
|
import Swoosh.Email
|
||||||
use Pleroma.Web, :mailer
|
|
||||||
alias Pleroma.Config
|
alias Pleroma.Config
|
||||||
alias Pleroma.HTML
|
alias Pleroma.HTML
|
||||||
|
alias Pleroma.Web.Router.Helpers
|
||||||
use Phoenix.VerifiedRoutes,
|
|
||||||
endpoint: Pleroma.Web.Endpoint,
|
|
||||||
router: Pleroma.Web.Router
|
|
||||||
|
|
||||||
defp instance_config, do: Config.get(:instance)
|
defp instance_config, do: Config.get(:instance)
|
||||||
defp instance_name, do: instance_config()[:name]
|
defp instance_name, do: instance_config()[:name]
|
||||||
|
@ -48,7 +45,7 @@ def report(to, reporter, account, statuses, comment) do
|
||||||
statuses
|
statuses
|
||||||
|> Enum.map(fn
|
|> Enum.map(fn
|
||||||
%{id: id} ->
|
%{id: id} ->
|
||||||
status_url = url(~p[/notice/#{id}])
|
status_url = Helpers.o_status_url(Pleroma.Web.Endpoint, :notice, id)
|
||||||
"<li><a href=\"#{status_url}\">#{status_url}</li>"
|
"<li><a href=\"#{status_url}\">#{status_url}</li>"
|
||||||
|
|
||||||
%{"id" => id} when is_binary(id) ->
|
%{"id" => id} when is_binary(id) ->
|
||||||
|
|
|
@ -55,61 +55,12 @@ def deliver!(email, config) do
|
||||||
|
|
||||||
@doc false
|
@doc false
|
||||||
def validate_dependency do
|
def validate_dependency do
|
||||||
parse_config([], defaults: false)
|
parse_config([])
|
||||||
|> Keyword.get(:adapter)
|
|> Keyword.get(:adapter)
|
||||||
|> Swoosh.Mailer.validate_dependency()
|
|> Swoosh.Mailer.validate_dependency()
|
||||||
end
|
end
|
||||||
|
|
||||||
defp ensure_charlist(input) do
|
defp parse_config(config) do
|
||||||
case input do
|
Swoosh.Mailer.parse_config(@otp_app, __MODULE__, @mailer_config, config)
|
||||||
i when is_binary(i) -> String.to_charlist(input)
|
|
||||||
i when is_list(i) -> i
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
defp default_config(adapter, conf, opts)
|
|
||||||
|
|
||||||
defp default_config(_, _, defaults: false) do
|
|
||||||
[]
|
|
||||||
end
|
|
||||||
|
|
||||||
defp default_config(Swoosh.Adapters.SMTP, conf, _) do
|
|
||||||
# gen_smtp and Erlang's tls defaults are very barebones, if nothing is set.
|
|
||||||
# Add sane defaults for our usecase to make config less painful for admins
|
|
||||||
relay = ensure_charlist(Keyword.get(conf, :relay))
|
|
||||||
ssl_disabled = Keyword.get(conf, :ssl) === false
|
|
||||||
os_cacerts = :public_key.cacerts_get()
|
|
||||||
|
|
||||||
common_tls_opts = [
|
|
||||||
cacerts: os_cacerts,
|
|
||||||
versions: [:"tlsv1.2", :"tlsv1.3"],
|
|
||||||
verify: :verify_peer,
|
|
||||||
# some versions have supposedly issues verifying wildcard certs without this
|
|
||||||
server_name_indication: relay,
|
|
||||||
# the default of 10 is too restrictive
|
|
||||||
depth: 32
|
|
||||||
]
|
|
||||||
|
|
||||||
[
|
|
||||||
auth: :always,
|
|
||||||
no_mx_lookups: false,
|
|
||||||
# Direct SSL/TLS
|
|
||||||
# (if ssl was explicitly disabled, we must not pass TLS options to the socket)
|
|
||||||
ssl: true,
|
|
||||||
sockopts: if(ssl_disabled, do: [], else: common_tls_opts),
|
|
||||||
# STARTTLS upgrade (can't be set to :always when already using direct TLS)
|
|
||||||
tls: :if_available,
|
|
||||||
tls_options: common_tls_opts
|
|
||||||
]
|
|
||||||
end
|
|
||||||
|
|
||||||
defp default_config(_, _, _), do: []
|
|
||||||
|
|
||||||
defp parse_config(config, opts \\ []) do
|
|
||||||
conf = Swoosh.Mailer.parse_config(@otp_app, __MODULE__, @mailer_config, config)
|
|
||||||
adapter = Keyword.get(conf, :adapter)
|
|
||||||
|
|
||||||
default_config(adapter, conf, opts)
|
|
||||||
|> Keyword.merge(conf)
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
|
@ -6,11 +6,12 @@ defmodule Pleroma.Emails.UserEmail do
|
||||||
@moduledoc "User emails"
|
@moduledoc "User emails"
|
||||||
|
|
||||||
require Pleroma.Web.Gettext
|
require Pleroma.Web.Gettext
|
||||||
use Pleroma.Web, :mailer
|
|
||||||
|
|
||||||
alias Pleroma.Config
|
alias Pleroma.Config
|
||||||
alias Pleroma.User
|
alias Pleroma.User
|
||||||
|
alias Pleroma.Web.Endpoint
|
||||||
alias Pleroma.Web.Gettext
|
alias Pleroma.Web.Gettext
|
||||||
|
alias Pleroma.Web.Router
|
||||||
|
|
||||||
import Swoosh.Email
|
import Swoosh.Email
|
||||||
import Phoenix.Swoosh, except: [render_body: 3]
|
import Phoenix.Swoosh, except: [render_body: 3]
|
||||||
|
@ -74,7 +75,7 @@ def welcome(user, opts \\ %{}) do
|
||||||
|
|
||||||
def password_reset_email(user, token) when is_binary(token) do
|
def password_reset_email(user, token) when is_binary(token) do
|
||||||
Gettext.with_locale_or_default user.language do
|
Gettext.with_locale_or_default user.language do
|
||||||
password_reset_url = url(~p[/api/v1/pleroma/password_reset/#{token}])
|
password_reset_url = Router.Helpers.reset_password_url(Endpoint, :reset, token)
|
||||||
|
|
||||||
html_body =
|
html_body =
|
||||||
Gettext.dpgettext(
|
Gettext.dpgettext(
|
||||||
|
@ -107,7 +108,12 @@ def user_invitation_email(
|
||||||
to_name \\ nil
|
to_name \\ nil
|
||||||
) do
|
) do
|
||||||
Gettext.with_locale_or_default user.language do
|
Gettext.with_locale_or_default user.language do
|
||||||
registration_url = url(~p[/registration/#{user_invite_token.token}])
|
registration_url =
|
||||||
|
Router.Helpers.redirect_url(
|
||||||
|
Endpoint,
|
||||||
|
:registration_page,
|
||||||
|
user_invite_token.token
|
||||||
|
)
|
||||||
|
|
||||||
html_body =
|
html_body =
|
||||||
Gettext.dpgettext(
|
Gettext.dpgettext(
|
||||||
|
@ -140,7 +146,13 @@ def user_invitation_email(
|
||||||
|
|
||||||
def account_confirmation_email(user) do
|
def account_confirmation_email(user) do
|
||||||
Gettext.with_locale_or_default user.language do
|
Gettext.with_locale_or_default user.language do
|
||||||
confirmation_url = url(~p[/api/account/confirm_email/#{user.id}/#{user.confirmation_token}])
|
confirmation_url =
|
||||||
|
Router.Helpers.confirm_email_url(
|
||||||
|
Endpoint,
|
||||||
|
:confirm_email,
|
||||||
|
user.id,
|
||||||
|
to_string(user.confirmation_token)
|
||||||
|
)
|
||||||
|
|
||||||
html_body =
|
html_body =
|
||||||
Gettext.dpgettext(
|
Gettext.dpgettext(
|
||||||
|
@ -330,7 +342,7 @@ def unsubscribe_url(user, notifications_type) do
|
||||||
|> Pleroma.JWT.generate_and_sign!()
|
|> Pleroma.JWT.generate_and_sign!()
|
||||||
|> Base.encode64()
|
|> Base.encode64()
|
||||||
|
|
||||||
url(~p[/mailer/unsubscribe/#{token}])
|
Router.Helpers.subscription_url(Endpoint, :unsubscribe, token)
|
||||||
end
|
end
|
||||||
|
|
||||||
def backup_is_ready_email(backup, admin_user_id \\ nil) do
|
def backup_is_ready_email(backup, admin_user_id \\ nil) do
|
||||||
|
|
|
@ -26,37 +26,12 @@ defmodule Pleroma.Emoji.Pack do
|
||||||
alias Pleroma.Emoji.Pack
|
alias Pleroma.Emoji.Pack
|
||||||
alias Pleroma.Utils
|
alias Pleroma.Utils
|
||||||
|
|
||||||
# Invalid/Malicious names are supposed to be filtered out before path joining,
|
|
||||||
# but there are many entrypoints to affected functions so as the code changes
|
|
||||||
# we might accidentally let an unsanitised name slip through.
|
|
||||||
# To make sure, use the below which crash the process otherwise.
|
|
||||||
|
|
||||||
# ALWAYS use this when constructing paths from external name!
|
|
||||||
# (name meaning it must be only a single path component)
|
|
||||||
defp path_join_name_safe(dir, name) do
|
|
||||||
if to_string(name) != Path.basename(name) or name in ["..", ".", ""] do
|
|
||||||
raise "Invalid or malicious pack name: #{name}"
|
|
||||||
else
|
|
||||||
Path.join(dir, name)
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
# ALWAYS use this to join external paths
|
|
||||||
# (which are allowed to have several components)
|
|
||||||
defp path_join_safe(dir, path) do
|
|
||||||
{:ok, safe_path} = Path.safe_relative(path)
|
|
||||||
Path.join(dir, safe_path)
|
|
||||||
end
|
|
||||||
|
|
||||||
@spec create(String.t()) :: {:ok, t()} | {:error, File.posix()} | {:error, :empty_values}
|
@spec create(String.t()) :: {:ok, t()} | {:error, File.posix()} | {:error, :empty_values}
|
||||||
def create(name) do
|
def create(name) do
|
||||||
with :ok <- validate_not_empty([name]),
|
with :ok <- validate_not_empty([name]),
|
||||||
dir <- path_join_name_safe(emoji_path(), name),
|
dir <- Path.join(emoji_path(), name),
|
||||||
:ok <- File.mkdir(dir) do
|
:ok <- File.mkdir(dir) do
|
||||||
save_pack(%__MODULE__{
|
save_pack(%__MODULE__{pack_file: Path.join(dir, "pack.json")})
|
||||||
path: dir,
|
|
||||||
pack_file: Path.join(dir, "pack.json")
|
|
||||||
})
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
@ -90,7 +65,7 @@ def show(opts) do
|
||||||
{:ok, [binary()]} | {:error, File.posix(), binary()} | {:error, :empty_values}
|
{:ok, [binary()]} | {:error, File.posix(), binary()} | {:error, :empty_values}
|
||||||
def delete(name) do
|
def delete(name) do
|
||||||
with :ok <- validate_not_empty([name]),
|
with :ok <- validate_not_empty([name]),
|
||||||
pack_path <- path_join_name_safe(emoji_path(), name) do
|
pack_path <- Path.join(emoji_path(), name) do
|
||||||
File.rm_rf(pack_path)
|
File.rm_rf(pack_path)
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
@ -114,7 +89,7 @@ defp unpack_zip_emojies(zip_files) do
|
||||||
end)
|
end)
|
||||||
end
|
end
|
||||||
|
|
||||||
@spec add_file(t(), String.t(), Path.t(), Plug.Upload.t() | binary()) ::
|
@spec add_file(t(), String.t(), Path.t(), Plug.Upload.t()) ::
|
||||||
{:ok, t()}
|
{:ok, t()}
|
||||||
| {:error, File.posix() | atom()}
|
| {:error, File.posix() | atom()}
|
||||||
def add_file(%Pack{} = pack, _, _, %Plug.Upload{content_type: "application/zip"} = file) do
|
def add_file(%Pack{} = pack, _, _, %Plug.Upload{content_type: "application/zip"} = file) do
|
||||||
|
@ -125,14 +100,14 @@ def add_file(%Pack{} = pack, _, _, %Plug.Upload{content_type: "application/zip"}
|
||||||
{:ok, _emoji_files} =
|
{:ok, _emoji_files} =
|
||||||
:zip.unzip(
|
:zip.unzip(
|
||||||
to_charlist(file.path),
|
to_charlist(file.path),
|
||||||
[{:file_list, Enum.map(emojies, & &1[:path])}, {:cwd, to_charlist(tmp_dir)}]
|
[{:file_list, Enum.map(emojies, & &1[:path])}, {:cwd, tmp_dir}]
|
||||||
)
|
)
|
||||||
|
|
||||||
{_, updated_pack} =
|
{_, updated_pack} =
|
||||||
Enum.map_reduce(emojies, pack, fn item, emoji_pack ->
|
Enum.map_reduce(emojies, pack, fn item, emoji_pack ->
|
||||||
emoji_file = %Plug.Upload{
|
emoji_file = %Plug.Upload{
|
||||||
filename: item[:filename],
|
filename: item[:filename],
|
||||||
path: path_join_safe(tmp_dir, item[:path])
|
path: Path.join(tmp_dir, item[:path])
|
||||||
}
|
}
|
||||||
|
|
||||||
{:ok, updated_pack} =
|
{:ok, updated_pack} =
|
||||||
|
@ -162,14 +137,6 @@ def add_file(%Pack{} = pack, _, _, %Plug.Upload{content_type: "application/zip"}
|
||||||
end
|
end
|
||||||
|
|
||||||
def add_file(%Pack{} = pack, shortcode, filename, %Plug.Upload{} = file) do
|
def add_file(%Pack{} = pack, shortcode, filename, %Plug.Upload{} = file) do
|
||||||
try_add_file(pack, shortcode, filename, file)
|
|
||||||
end
|
|
||||||
|
|
||||||
def add_file(%Pack{} = pack, shortcode, filename, filedata) when is_binary(filedata) do
|
|
||||||
try_add_file(pack, shortcode, filename, filedata)
|
|
||||||
end
|
|
||||||
|
|
||||||
defp try_add_file(%Pack{} = pack, shortcode, filename, file) do
|
|
||||||
with :ok <- validate_not_empty([shortcode, filename]),
|
with :ok <- validate_not_empty([shortcode, filename]),
|
||||||
:ok <- validate_emoji_not_exists(shortcode),
|
:ok <- validate_emoji_not_exists(shortcode),
|
||||||
{:ok, updated_pack} <- do_add_file(pack, shortcode, filename, file) do
|
{:ok, updated_pack} <- do_add_file(pack, shortcode, filename, file) do
|
||||||
|
@ -222,7 +189,6 @@ def import_from_filesystem do
|
||||||
{:ok, results} <- File.ls(emoji_path) do
|
{:ok, results} <- File.ls(emoji_path) do
|
||||||
names =
|
names =
|
||||||
results
|
results
|
||||||
# items come from File.ls, thus safe
|
|
||||||
|> Enum.map(&Path.join(emoji_path, &1))
|
|> Enum.map(&Path.join(emoji_path, &1))
|
||||||
|> Enum.reject(fn path ->
|
|> Enum.reject(fn path ->
|
||||||
File.dir?(path) and File.exists?(Path.join(path, "pack.json"))
|
File.dir?(path) and File.exists?(Path.join(path, "pack.json"))
|
||||||
|
@ -321,8 +287,8 @@ def update_metadata(name, data) do
|
||||||
|
|
||||||
@spec load_pack(String.t()) :: {:ok, t()} | {:error, :file.posix()}
|
@spec load_pack(String.t()) :: {:ok, t()} | {:error, :file.posix()}
|
||||||
def load_pack(name) do
|
def load_pack(name) do
|
||||||
pack_dir = path_join_name_safe(emoji_path(), name)
|
name = Path.basename(name)
|
||||||
pack_file = Path.join(pack_dir, "pack.json")
|
pack_file = Path.join([emoji_path(), name, "pack.json"])
|
||||||
|
|
||||||
with {:ok, _} <- File.stat(pack_file),
|
with {:ok, _} <- File.stat(pack_file),
|
||||||
{:ok, pack_data} <- File.read(pack_file) do
|
{:ok, pack_data} <- File.read(pack_file) do
|
||||||
|
@ -446,13 +412,7 @@ defp downloadable?(pack) do
|
||||||
end
|
end
|
||||||
|
|
||||||
defp create_archive_and_cache(pack, hash) do
|
defp create_archive_and_cache(pack, hash) do
|
||||||
files = [
|
files = [~c"pack.json" | Enum.map(pack.files, fn {_, file} -> to_charlist(file) end)]
|
||||||
~c"pack.json"
|
|
||||||
| Enum.map(pack.files, fn {_, file} ->
|
|
||||||
{:ok, file} = Path.safe_relative(file)
|
|
||||||
to_charlist(file)
|
|
||||||
end)
|
|
||||||
]
|
|
||||||
|
|
||||||
{:ok, {_, result}} =
|
{:ok, {_, result}} =
|
||||||
:zip.zip(~c"#{pack.name}.zip", files, [:memory, cwd: to_charlist(pack.path)])
|
:zip.zip(~c"#{pack.name}.zip", files, [:memory, cwd: to_charlist(pack.path)])
|
||||||
|
@ -514,7 +474,7 @@ defp validate_not_empty(list) do
|
||||||
end
|
end
|
||||||
|
|
||||||
defp save_file(%Plug.Upload{path: upload_path}, pack, filename) do
|
defp save_file(%Plug.Upload{path: upload_path}, pack, filename) do
|
||||||
file_path = path_join_safe(pack.path, filename)
|
file_path = Path.join(pack.path, filename)
|
||||||
create_subdirs(file_path)
|
create_subdirs(file_path)
|
||||||
|
|
||||||
with {:ok, _} <- File.copy(upload_path, file_path) do
|
with {:ok, _} <- File.copy(upload_path, file_path) do
|
||||||
|
@ -522,12 +482,6 @@ defp save_file(%Plug.Upload{path: upload_path}, pack, filename) do
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
defp save_file(file_data, pack, filename) when is_binary(file_data) do
|
|
||||||
file_path = path_join_safe(pack.path, filename)
|
|
||||||
create_subdirs(file_path)
|
|
||||||
File.write(file_path, file_data, [:binary])
|
|
||||||
end
|
|
||||||
|
|
||||||
defp put_emoji(pack, shortcode, filename) do
|
defp put_emoji(pack, shortcode, filename) do
|
||||||
files = Map.put(pack.files, shortcode, filename)
|
files = Map.put(pack.files, shortcode, filename)
|
||||||
%{pack | files: files, files_count: length(Map.keys(files))}
|
%{pack | files: files, files_count: length(Map.keys(files))}
|
||||||
|
@ -539,8 +493,8 @@ defp delete_emoji(pack, shortcode) do
|
||||||
end
|
end
|
||||||
|
|
||||||
defp rename_file(pack, filename, new_filename) do
|
defp rename_file(pack, filename, new_filename) do
|
||||||
old_path = path_join_safe(pack.path, filename)
|
old_path = Path.join(pack.path, filename)
|
||||||
new_path = path_join_safe(pack.path, new_filename)
|
new_path = Path.join(pack.path, new_filename)
|
||||||
create_subdirs(new_path)
|
create_subdirs(new_path)
|
||||||
|
|
||||||
with :ok <- File.rename(old_path, new_path) do
|
with :ok <- File.rename(old_path, new_path) do
|
||||||
|
@ -558,7 +512,7 @@ defp create_subdirs(file_path) do
|
||||||
|
|
||||||
defp remove_file(pack, shortcode) do
|
defp remove_file(pack, shortcode) do
|
||||||
with {:ok, filename} <- get_filename(pack, shortcode),
|
with {:ok, filename} <- get_filename(pack, shortcode),
|
||||||
emoji <- path_join_safe(pack.path, filename),
|
emoji <- Path.join(pack.path, filename),
|
||||||
:ok <- File.rm(emoji) do
|
:ok <- File.rm(emoji) do
|
||||||
remove_dir_if_empty(emoji, filename)
|
remove_dir_if_empty(emoji, filename)
|
||||||
end
|
end
|
||||||
|
@ -576,7 +530,7 @@ defp remove_dir_if_empty(emoji, filename) do
|
||||||
|
|
||||||
defp get_filename(pack, shortcode) do
|
defp get_filename(pack, shortcode) do
|
||||||
with %{^shortcode => filename} when is_binary(filename) <- pack.files,
|
with %{^shortcode => filename} when is_binary(filename) <- pack.files,
|
||||||
file_path <- path_join_safe(pack.path, filename),
|
file_path <- Path.join(pack.path, filename),
|
||||||
{:ok, _} <- File.stat(file_path) do
|
{:ok, _} <- File.stat(file_path) do
|
||||||
{:ok, filename}
|
{:ok, filename}
|
||||||
else
|
else
|
||||||
|
@ -614,7 +568,7 @@ defp validate_downloadable(pack) do
|
||||||
end
|
end
|
||||||
|
|
||||||
defp copy_as(remote_pack, local_name) do
|
defp copy_as(remote_pack, local_name) do
|
||||||
path = path_join_name_safe(emoji_path(), local_name)
|
path = Path.join(emoji_path(), local_name)
|
||||||
|
|
||||||
%__MODULE__{
|
%__MODULE__{
|
||||||
name: local_name,
|
name: local_name,
|
||||||
|
|
|
@ -79,10 +79,6 @@ def unzip(zip, dest) do
|
||||||
|
|
||||||
new_file_path = Path.join(dest, path)
|
new_file_path = Path.join(dest, path)
|
||||||
|
|
||||||
new_file_path
|
|
||||||
|> Path.dirname()
|
|
||||||
|> File.rm()
|
|
||||||
|
|
||||||
new_file_path
|
new_file_path
|
||||||
|> Path.dirname()
|
|> Path.dirname()
|
||||||
|> File.mkdir_p!()
|
|> File.mkdir_p!()
|
||||||
|
|
|
@ -6,6 +6,8 @@ defmodule Pleroma.HTML do
|
||||||
# Scrubbers are compiled on boot so they can be configured in OTP releases
|
# Scrubbers are compiled on boot so they can be configured in OTP releases
|
||||||
# @on_load :compile_scrubbers
|
# @on_load :compile_scrubbers
|
||||||
|
|
||||||
|
@cachex Pleroma.Config.get([:cachex, :provider], Cachex)
|
||||||
|
|
||||||
def compile_scrubbers do
|
def compile_scrubbers do
|
||||||
dir = Path.join(:code.priv_dir(:pleroma), "scrubbers")
|
dir = Path.join(:code.priv_dir(:pleroma), "scrubbers")
|
||||||
|
|
||||||
|
@ -65,9 +67,22 @@ def ensure_scrubbed_html(
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@spec extract_first_external_url_from_object(Pleroma.Object.t()) :: String.t() | nil
|
def extract_first_external_url_from_object(%{data: %{"content" => content}} = object)
|
||||||
def extract_first_external_url_from_object(%{data: %{"content" => content}})
|
|
||||||
when is_binary(content) do
|
when is_binary(content) do
|
||||||
|
unless object.data["fake"] do
|
||||||
|
key = "URL|#{object.id}"
|
||||||
|
|
||||||
|
@cachex.fetch!(:scrubber_cache, key, fn _key ->
|
||||||
|
{:commit, {:ok, extract_first_external_url(content)}}
|
||||||
|
end)
|
||||||
|
else
|
||||||
|
{:ok, extract_first_external_url(content)}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def extract_first_external_url_from_object(_), do: {:error, :no_content}
|
||||||
|
|
||||||
|
def extract_first_external_url(content) do
|
||||||
content
|
content
|
||||||
|> Floki.parse_fragment!()
|
|> Floki.parse_fragment!()
|
||||||
|> Floki.find("a:not(.mention,.hashtag,.attachment,[rel~=\"tag\"])")
|
|> Floki.find("a:not(.mention,.hashtag,.attachment,[rel~=\"tag\"])")
|
||||||
|
@ -75,6 +90,4 @@ def extract_first_external_url_from_object(%{data: %{"content" => content}})
|
||||||
|> Floki.attribute("href")
|
|> Floki.attribute("href")
|
||||||
|> Enum.at(0)
|
|> Enum.at(0)
|
||||||
end
|
end
|
||||||
|
|
||||||
def extract_first_external_url_from_object(_), do: nil
|
|
||||||
end
|
end
|
||||||
|
|
|
@ -74,12 +74,7 @@ def request(method, url, body, headers, options) when is_binary(url) do
|
||||||
request = build_request(method, headers, options, url, body, params)
|
request = build_request(method, headers, options, url, body, params)
|
||||||
client = Tesla.client([Tesla.Middleware.FollowRedirects, Tesla.Middleware.Telemetry])
|
client = Tesla.client([Tesla.Middleware.FollowRedirects, Tesla.Middleware.Telemetry])
|
||||||
|
|
||||||
Logger.debug("Outbound: #{method} #{url}")
|
|
||||||
request(client, request)
|
request(client, request)
|
||||||
rescue
|
|
||||||
e ->
|
|
||||||
Logger.error("Failed to fetch #{url}: #{inspect(e)}")
|
|
||||||
{:error, :fetch_error}
|
|
||||||
end
|
end
|
||||||
|
|
||||||
@spec request(Client.t(), keyword()) :: {:ok, Env.t()} | {:error, any()}
|
@spec request(Client.t(), keyword()) :: {:ok, Env.t()} | {:error, any()}
|
||||||
|
|
|
@ -65,15 +65,6 @@ def add_pool_size(opts, pool_size) do
|
||||||
|> put_in([:pools, :default, :size], pool_size)
|
|> put_in([:pools, :default, :size], pool_size)
|
||||||
end
|
end
|
||||||
|
|
||||||
def ensure_ipv6(opts) do
|
|
||||||
# Default transport opts already enable IPv6, so just ensure they're loaded
|
|
||||||
opts
|
|
||||||
|> maybe_add_pools()
|
|
||||||
|> maybe_add_default_pool()
|
|
||||||
|> maybe_add_conn_opts()
|
|
||||||
|> maybe_add_transport_opts()
|
|
||||||
end
|
|
||||||
|
|
||||||
defp maybe_add_pools(opts) do
|
defp maybe_add_pools(opts) do
|
||||||
if Keyword.has_key?(opts, :pools) do
|
if Keyword.has_key?(opts, :pools) do
|
||||||
opts
|
opts
|
||||||
|
@ -105,29 +96,11 @@ defp maybe_add_conn_opts(opts) do
|
||||||
defp maybe_add_transport_opts(opts) do
|
defp maybe_add_transport_opts(opts) do
|
||||||
transport_opts = get_in(opts, [:pools, :default, :conn_opts, :transport_opts])
|
transport_opts = get_in(opts, [:pools, :default, :conn_opts, :transport_opts])
|
||||||
|
|
||||||
opts =
|
unless is_nil(transport_opts) do
|
||||||
unless is_nil(transport_opts) do
|
opts
|
||||||
opts
|
else
|
||||||
else
|
put_in(opts, [:pools, :default, :conn_opts, :transport_opts], [])
|
||||||
put_in(opts, [:pools, :default, :conn_opts, :transport_opts], [])
|
end
|
||||||
end
|
|
||||||
|
|
||||||
# IPv6 is disabled and IPv4 enabled by default; ensure we can use both
|
|
||||||
put_in(opts, [:pools, :default, :conn_opts, :transport_opts, :inet6], true)
|
|
||||||
end
|
|
||||||
|
|
||||||
def add_default_pool_max_idle_time(opts, pool_timeout) do
|
|
||||||
opts
|
|
||||||
|> maybe_add_pools()
|
|
||||||
|> maybe_add_default_pool()
|
|
||||||
|> put_in([:pools, :default, :pool_max_idle_time], pool_timeout)
|
|
||||||
end
|
|
||||||
|
|
||||||
def add_default_conn_max_idle_time(opts, connection_timeout) do
|
|
||||||
opts
|
|
||||||
|> maybe_add_pools()
|
|
||||||
|> maybe_add_default_pool()
|
|
||||||
|> put_in([:pools, :default, :conn_max_idle_time], connection_timeout)
|
|
||||||
end
|
end
|
||||||
|
|
||||||
@doc """
|
@doc """
|
||||||
|
|
|
@ -1,121 +0,0 @@
|
||||||
defmodule Pleroma.HTTP.Backoff do
|
|
||||||
alias Pleroma.HTTP
|
|
||||||
require Logger
|
|
||||||
|
|
||||||
@cachex Pleroma.Config.get([:cachex, :provider], Cachex)
|
|
||||||
@backoff_cache :http_backoff_cache
|
|
||||||
|
|
||||||
# attempt to parse a timestamp from a header
|
|
||||||
# returns nil if it can't parse the timestamp
|
|
||||||
@spec timestamp_or_nil(binary) :: DateTime.t() | nil
|
|
||||||
defp timestamp_or_nil(header) do
|
|
||||||
case DateTime.from_iso8601(header) do
|
|
||||||
{:ok, stamp, _} ->
|
|
||||||
stamp
|
|
||||||
|
|
||||||
_ ->
|
|
||||||
nil
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
# attempt to parse the x-ratelimit-reset header from the headers
|
|
||||||
@spec x_ratelimit_reset(headers :: list) :: DateTime.t() | nil
|
|
||||||
defp x_ratelimit_reset(headers) do
|
|
||||||
with {_header, value} <- List.keyfind(headers, "x-ratelimit-reset", 0),
|
|
||||||
true <- is_binary(value) do
|
|
||||||
timestamp_or_nil(value)
|
|
||||||
else
|
|
||||||
_ ->
|
|
||||||
nil
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
# attempt to parse the Retry-After header from the headers
|
|
||||||
# this can be either a timestamp _or_ a number of seconds to wait!
|
|
||||||
# we'll return a datetime if we can parse it, or nil if we can't
|
|
||||||
@spec retry_after(headers :: list) :: DateTime.t() | nil
|
|
||||||
defp retry_after(headers) do
|
|
||||||
with {_header, value} <- List.keyfind(headers, "retry-after", 0),
|
|
||||||
true <- is_binary(value) do
|
|
||||||
# first, see if it's an integer
|
|
||||||
case Integer.parse(value) do
|
|
||||||
{seconds, ""} ->
|
|
||||||
Logger.debug("Parsed Retry-After header: #{seconds} seconds")
|
|
||||||
DateTime.utc_now() |> Timex.shift(seconds: seconds)
|
|
||||||
|
|
||||||
_ ->
|
|
||||||
# if it's not an integer, try to parse it as a timestamp
|
|
||||||
timestamp_or_nil(value)
|
|
||||||
end
|
|
||||||
else
|
|
||||||
_ ->
|
|
||||||
nil
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
# given a set of headers, will attempt to find the next backoff timestamp
|
|
||||||
# if it can't find one, it will default to 5 minutes from now
|
|
||||||
@spec next_backoff_timestamp(%{headers: list}) :: DateTime.t()
|
|
||||||
defp next_backoff_timestamp(%{headers: headers}) when is_list(headers) do
|
|
||||||
default_5_minute_backoff =
|
|
||||||
DateTime.utc_now()
|
|
||||||
|> Timex.shift(seconds: 5 * 60)
|
|
||||||
|
|
||||||
backoff =
|
|
||||||
[&x_ratelimit_reset/1, &retry_after/1]
|
|
||||||
|> Enum.map(& &1.(headers))
|
|
||||||
|> Enum.find(&(&1 != nil))
|
|
||||||
|
|
||||||
if is_nil(backoff) do
|
|
||||||
Logger.debug("No backoff headers found, defaulting to 5 minutes from now")
|
|
||||||
default_5_minute_backoff
|
|
||||||
else
|
|
||||||
Logger.debug("Found backoff header, will back off until: #{backoff}")
|
|
||||||
backoff
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
defp next_backoff_timestamp(_), do: DateTime.utc_now() |> Timex.shift(seconds: 5 * 60)
|
|
||||||
|
|
||||||
# utility function to check the HTTP response for potential backoff headers
|
|
||||||
# will check if we get a 429 or 503 response, and if we do, will back off for a bit
|
|
||||||
@spec check_backoff({:ok | :error, HTTP.Env.t()}, binary()) ::
|
|
||||||
{:ok | :error, HTTP.Env.t()} | {:error, :ratelimit}
|
|
||||||
defp check_backoff({:ok, env}, host) do
|
|
||||||
case env.status do
|
|
||||||
status when status in [429, 503] ->
|
|
||||||
Logger.error("Rate limited on #{host}! Backing off...")
|
|
||||||
timestamp = next_backoff_timestamp(env)
|
|
||||||
ttl = Timex.diff(timestamp, DateTime.utc_now(), :seconds)
|
|
||||||
# we will cache the host for 5 minutes
|
|
||||||
@cachex.put(@backoff_cache, host, true, ttl: ttl)
|
|
||||||
{:error, :ratelimit}
|
|
||||||
|
|
||||||
_ ->
|
|
||||||
{:ok, env}
|
|
||||||
end
|
|
||||||
end
|
|
||||||
|
|
||||||
defp check_backoff(env, _), do: env
|
|
||||||
|
|
||||||
@doc """
|
|
||||||
this acts as a single throughput for all GET requests
|
|
||||||
we will check if the host is in the cache, and if it is, we will automatically fail the request
|
|
||||||
this ensures that we don't hammer the server with requests, and instead wait for the backoff to expire
|
|
||||||
this is a very simple implementation, and can be improved upon!
|
|
||||||
"""
|
|
||||||
@spec get(binary, list, list) :: {:ok | :error, HTTP.Env.t()} | {:error, :ratelimit}
|
|
||||||
def get(url, headers \\ [], options \\ []) do
|
|
||||||
%{host: host} = URI.parse(url)
|
|
||||||
|
|
||||||
case @cachex.get(@backoff_cache, host) do
|
|
||||||
{:ok, nil} ->
|
|
||||||
url
|
|
||||||
|> HTTP.get(headers, options)
|
|
||||||
|> check_backoff(host)
|
|
||||||
|
|
||||||
_ ->
|
|
||||||
{:error, :ratelimit}
|
|
||||||
end
|
|
||||||
end
|
|
||||||
end
|
|
|
@ -15,19 +15,8 @@ def start_link(_) do
|
||||||
|
|
||||||
@impl true
|
@impl true
|
||||||
def init(state) do
|
def init(state) do
|
||||||
:telemetry.attach(
|
:telemetry.attach("oban-monitor-failure", [:oban, :job, :exception], &handle_event/4, nil)
|
||||||
"oban-monitor-failure",
|
:telemetry.attach("oban-monitor-success", [:oban, :job, :stop], &handle_event/4, nil)
|
||||||
[:oban, :job, :exception],
|
|
||||||
&Pleroma.JobQueueMonitor.handle_event/4,
|
|
||||||
nil
|
|
||||||
)
|
|
||||||
|
|
||||||
:telemetry.attach(
|
|
||||||
"oban-monitor-success",
|
|
||||||
[:oban, :job, :stop],
|
|
||||||
&Pleroma.JobQueueMonitor.handle_event/4,
|
|
||||||
nil
|
|
||||||
)
|
|
||||||
|
|
||||||
{:ok, state}
|
{:ok, state}
|
||||||
end
|
end
|
||||||
|
|
46
lib/pleroma/keys.ex
Normal file
46
lib/pleroma/keys.ex
Normal file
|
@ -0,0 +1,46 @@
|
||||||
|
# Pleroma: A lightweight social networking server
|
||||||
|
# Copyright © 2017-2021 Pleroma Authors <https://pleroma.social/>
|
||||||
|
# SPDX-License-Identifier: AGPL-3.0-only
|
||||||
|
|
||||||
|
defmodule Pleroma.Keys do
|
||||||
|
# Native generation of RSA keys is only available since OTP 20+ and in default build conditions
|
||||||
|
# We try at compile time to generate natively an RSA key otherwise we fallback on the old way.
|
||||||
|
try do
|
||||||
|
_ = :public_key.generate_key({:rsa, 2048, 65_537})
|
||||||
|
|
||||||
|
def generate_rsa_pem do
|
||||||
|
key = :public_key.generate_key({:rsa, 2048, 65_537})
|
||||||
|
entry = :public_key.pem_entry_encode(:RSAPrivateKey, key)
|
||||||
|
pem = :public_key.pem_encode([entry]) |> String.trim_trailing()
|
||||||
|
{:ok, pem}
|
||||||
|
end
|
||||||
|
rescue
|
||||||
|
_ ->
|
||||||
|
def generate_rsa_pem do
|
||||||
|
port = Port.open({:spawn, "openssl genrsa"}, [:binary])
|
||||||
|
|
||||||
|
{:ok, pem} =
|
||||||
|
receive do
|
||||||
|
{^port, {:data, pem}} -> {:ok, pem}
|
||||||
|
end
|
||||||
|
|
||||||
|
Port.close(port)
|
||||||
|
|
||||||
|
if Regex.match?(~r/RSA PRIVATE KEY/, pem) do
|
||||||
|
{:ok, pem}
|
||||||
|
else
|
||||||
|
:error
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
def keys_from_pem(pem) do
|
||||||
|
with [private_key_code] <- :public_key.pem_decode(pem),
|
||||||
|
private_key <- :public_key.pem_entry_decode(private_key_code),
|
||||||
|
{:RSAPrivateKey, _, modulus, exponent, _, _, _, _, _, _, _} <- private_key do
|
||||||
|
{:ok, private_key, {:RSAPublicKey, modulus, exponent}}
|
||||||
|
else
|
||||||
|
error -> {:error, error}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
|
@ -178,10 +178,7 @@ def normalize(ap_id, options) when is_binary(ap_id) do
|
||||||
ap_id
|
ap_id
|
||||||
|
|
||||||
Keyword.get(options, :fetch) ->
|
Keyword.get(options, :fetch) ->
|
||||||
case Fetcher.fetch_object_from_id(ap_id, options) do
|
Fetcher.fetch_object_from_id!(ap_id, options)
|
||||||
{:ok, object} -> object
|
|
||||||
_ -> nil
|
|
||||||
end
|
|
||||||
|
|
||||||
true ->
|
true ->
|
||||||
get_cached_by_ap_id(ap_id)
|
get_cached_by_ap_id(ap_id)
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue