Compare commits

..

No commits in common. "develop" and "bad-uploader-config-warning" have entirely different histories.

289 changed files with 22669 additions and 44623 deletions

1
.buildpacks Normal file
View file

@ -0,0 +1 @@
https://github.com/hashnuke/heroku-buildpack-elixir

1
.gitignore vendored
View file

@ -78,4 +78,3 @@ docs/venv
# docker stuff # docker stuff
docker-db docker-db
*.iml *.iml
docker-compose.override.yml

View file

@ -1,5 +1,4 @@
labels: platform: linux/amd64
platform: linux/amd64
depends_on: depends_on:
- test - test
@ -35,7 +34,7 @@ variables:
- &clean "(rm -rf release || true) && (rm -rf _build || true) && (rm -rf /root/.mix)" - &clean "(rm -rf release || true) && (rm -rf _build || true) && (rm -rf /root/.mix)"
- &mix-clean "mix deps.clean --all && mix clean" - &mix-clean "mix deps.clean --all && mix clean"
steps: pipeline:
# Canonical amd64 # Canonical amd64
debian-bookworm: debian-bookworm:
image: hexpm/elixir:1.15.4-erlang-26.0.2-debian-bookworm-20230612 image: hexpm/elixir:1.15.4-erlang-26.0.2-debian-bookworm-20230612

View file

@ -1,5 +1,4 @@
labels: platform: linux/arm64
platform: linux/aarch64
depends_on: depends_on:
- test - test
@ -35,7 +34,7 @@ variables:
- &clean "(rm -rf release || true) && (rm -rf _build || true) && (rm -rf /root/.mix)" - &clean "(rm -rf release || true) && (rm -rf _build || true) && (rm -rf /root/.mix)"
- &mix-clean "mix deps.clean --all && mix clean" - &mix-clean "mix deps.clean --all && mix clean"
steps: pipeline:
# Canonical arm64 # Canonical arm64
debian-bookworm: debian-bookworm:
image: hexpm/elixir:1.15.4-erlang-26.0.2-debian-bookworm-20230612 image: hexpm/elixir:1.15.4-erlang-26.0.2-debian-bookworm-20230612

View file

@ -1,5 +1,4 @@
labels: platform: linux/amd64
platform: linux/amd64
depends_on: depends_on:
- test - test
@ -46,7 +45,7 @@ variables:
- &clean "(rm -rf release || true) && (rm -rf _build || true) && (rm -rf /root/.mix)" - &clean "(rm -rf release || true) && (rm -rf _build || true) && (rm -rf /root/.mix)"
- &mix-clean "mix deps.clean --all && mix clean" - &mix-clean "mix deps.clean --all && mix clean"
steps: pipeline:
docs: docs:
<<: *on-point-release <<: *on-point-release
secrets: secrets:

View file

@ -1,5 +1,4 @@
labels: platform: linux/amd64
platform: linux/amd64
variables: variables:
- &scw-secrets - &scw-secrets
@ -42,9 +41,9 @@ variables:
- &clean "(rm -rf release || true) && (rm -rf _build || true) && (rm -rf /root/.mix)" - &clean "(rm -rf release || true) && (rm -rf _build || true) && (rm -rf /root/.mix)"
- &mix-clean "mix deps.clean --all && mix clean" - &mix-clean "mix deps.clean --all && mix clean"
steps: pipeline:
lint: lint:
image: akkoma/ci-base:1.16-otp26 image: akkoma/ci-base:1.15-otp26
<<: *on-pr-open <<: *on-pr-open
environment: environment:
MIX_ENV: test MIX_ENV: test

View file

@ -1,5 +1,4 @@
labels: platform: linux/amd64
platform: linux/amd64
depends_on: depends_on:
- lint - lint
@ -8,12 +7,15 @@ matrix:
ELIXIR_VERSION: ELIXIR_VERSION:
- 1.14 - 1.14
- 1.15 - 1.15
- 1.16
OTP_VERSION: OTP_VERSION:
- 25 - 25
- 26 - 26
include: include:
- ELIXIR_VERSION: 1.16 - ELIXIR_VERSION: 1.14
OTP_VERSION: 25
- ELIXIR_VERSION: 1.15
OTP_VERSION: 25
- ELIXIR_VERSION: 1.15
OTP_VERSION: 26 OTP_VERSION: 26
variables: variables:
@ -68,7 +70,7 @@ services:
POSTGRES_USER: postgres POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres POSTGRES_PASSWORD: postgres
steps: pipeline:
test: test:
image: akkoma/ci-base:${ELIXIR_VERSION}-otp${OTP_VERSION} image: akkoma/ci-base:${ELIXIR_VERSION}-otp${OTP_VERSION}
<<: *on-pr-open <<: *on-pr-open
@ -87,5 +89,5 @@ steps:
- mix ecto.create - mix ecto.create
- mix ecto.migrate - mix ecto.migrate
- mkdir -p test/tmp - mkdir -p test/tmp
- mix test --preload-modules --exclude erratic --exclude federated --exclude mocked || mix test --failed - mix test --preload-modules --exclude erratic --exclude federated --exclude mocked
- mix test --preload-modules --only mocked || mix test --failed - mix test --preload-modules --only mocked

View file

@ -4,73 +4,6 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## 3.13.3
## BREAKING
- Minimum PostgreSQL version is raised to 12
- Swagger UI moved from `/akkoma/swaggerui/` to `/pleroma/swaggerui/`
## Added
- Implement [FEP-67ff](https://codeberg.org/fediverse/fep/src/branch/main/fep/67ff/fep-67ff.md) (federation documentation)
- Meilisearch: it is now possible to use separate keys for search and admin actions
- New standalone `prune_orphaned_activities` mix task with configurable batch limit
- The `prune_objects` mix task now accepts a `--limit` parameter for initial object pruning
## Fixed
- Meilisearch: order of results returned from our REST API now actually matches how Meilisearch ranks results
- Emoji are now federated as anonymous objects, fixing issues with
some strict servers e.g. rejecting e.g. remote emoji reactions
- AP objects with additional JSON-LD profiles beyond ActivityStreams can now be fetched
- Single-selection polls no longer expose the voter_count; MastoAPI demands it be null
and this confused some clients leading to vote distributions >100%
## Changed
- Refactored Rich Media to cache the content in the database. Fetching operations that could block status rendering have been eliminated.
## 2024.04.1 (Security)
## Fixed
- Issue allowing non-owners to use media objects in posts
- Issue allowing use of non-media objects as attachments and crashing timeline rendering
- Issue allowing webfinger spoofing in certain situations
## 2024.04
## Added
- Support for [FEP-fffd](https://codeberg.org/fediverse/fep/src/branch/main/fep/fffd/fep-fffd.md) (proxy objects)
- Verified support for elixir 1.16
- Uploadfilter `Pleroma.Upload.Filter.Exiftool.ReadDescription` returns description values to the FE so they can pre fill the image description field
NOTE: this filter MUST be placed before `Exiftool.StripMetadata` to work
## Changed
- Inbound pipeline error handing was modified somewhat, which should lead to less incomprehensible log spam. Hopefully.
- Uploadfilter `Pleroma.Upload.Filter.Exiftool` was replaced by `Pleroma.Upload.Filter.Exiftool.StripMetadata`;
the latter strips all non-essential metadata by default but can be configured.
To regain the old behaviour of only stripping GPS data set `purge: ["gps:all"]`.
- Uploadfilter `Pleroma.Upload.Filter.Exiftool` has been renamed to `Pleroma.Upload.Filter.Exiftool.StripMetadata`
- MRF.InlineQuotePolicy now prefers to insert display URLs instead of ActivityPub IDs
- Old accounts are no longer listed in WebFinger as aliases; this was breaking spec
## Fixed
- Issue preventing fetching anything from IPv6-only instances
- Issue allowing post content to leak via opengraph tags despite :estrict\_unauthenticated being set
- Move activities no longer operate on stale user data
- Missing definitions in our JSON-LD context
- Issue mangling newlines in code blocks for RSS/Atom feeds
- static\_fe squeezing non-square avatars and emoji
- Issue leading to properly JSON-LD compacted emoji reactions being rejected
- We now use a standard-compliant Accept header when fetching ActivityPub objects
- /api/pleroma/notification\_settings was rejecting body parameters;
this also broke changing this setting via akkoma-fe
- Issue leading to Mastodon bot accounts being rejected
- Scope misdetection of remote posts resulting from not recognising
JSON-LD-compacted forms of public scope; affected e.g. federation with bovine
- Ratelimits encountered when fetching objects are now respected; 429 responses will cause a backoff when we get one.
## Removed
- ActivityPub Client-To-Server write API endpoints have been disabled;
read endpoints are planned to be removed next release unless a clear need is demonstrated
## 2024.03 ## 2024.03
## Added ## Added

View file

@ -1,42 +0,0 @@
# Federation
## Supported federation protocols and standards
- [ActivityPub](https://www.w3.org/TR/activitypub/) (Server-to-Server)
- [WebFinger](https://webfinger.net/)
- [Http Signatures](https://datatracker.ietf.org/doc/html/draft-cavage-http-signatures)
- [NodeInfo](https://nodeinfo.diaspora.software/)
## Supported FEPs
- [FEP-67ff: FEDERATION](https://codeberg.org/fediverse/fep/src/branch/main/fep/67ff/fep-67ff.md)
- [FEP-f1d5: NodeInfo in Fediverse Software](https://codeberg.org/fediverse/fep/src/branch/main/fep/f1d5/fep-f1d5.md)
- [FEP-fffd: Proxy Objects](https://codeberg.org/fediverse/fep/src/branch/main/fep/fffd/fep-fffd.md)
## ActivityPub
Akkoma mostly follows the server-to-server parts of the ActivityPub standard,
but implements quirks for Mastodon compatibility as well as Mastodon-specific
and custom extensions.
See our documentation and Mastodons federation information
linked further below for details on these quirks and extensions.
Akkoma does not perform JSON-LD processing.
### Required extensions
#### HTTP Signatures
All AP S2S POST requests to Akkoma instances MUST be signed.
Depending on instance configuration the same may be true for GET requests.
## Nodeinfo
Akkoma provides many additional entries in its nodeinfo response,
see the documentation linked below for details.
## Additional documentation
- [Akkomas ActivityPub extensions](https://docs.akkoma.dev/develop/development/ap_extensions/)
- [Akkomas nodeinfo extensions](https://docs.akkoma.dev/develop/development/nodeinfo_extensions/)
- [Mastodons federation requirements](https://github.com/mastodon/mastodon/blob/main/FEDERATION.md)

2
Procfile Normal file
View file

@ -0,0 +1,2 @@
web: mix phx.server
release: mix ecto.migrate

View file

@ -63,6 +63,7 @@
uploader: Pleroma.Uploaders.Local, uploader: Pleroma.Uploaders.Local,
filters: [], filters: [],
link_name: false, link_name: false,
proxy_remote: false,
filename_display_max_length: 30, filename_display_max_length: 30,
base_url: nil, base_url: nil,
allowed_mime_types: ["image", "audio", "video"] allowed_mime_types: ["image", "audio", "video"]
@ -188,10 +189,8 @@
receive_timeout: :timer.seconds(15), receive_timeout: :timer.seconds(15),
proxy_url: nil, proxy_url: nil,
user_agent: :default, user_agent: :default,
pool_size: 10, pool_size: 50,
adapter: [], adapter: []
# see: https://hexdocs.pm/finch/Finch.html#start_link/1
pool_max_idle_time: :timer.seconds(30)
config :pleroma, :instance, config :pleroma, :instance,
name: "Akkoma", name: "Akkoma",
@ -438,12 +437,8 @@
Pleroma.Web.RichMedia.Parsers.TwitterCard, Pleroma.Web.RichMedia.Parsers.TwitterCard,
Pleroma.Web.RichMedia.Parsers.OEmbed Pleroma.Web.RichMedia.Parsers.OEmbed
], ],
failure_backoff: 60_000, failure_backoff: :timer.minutes(20),
ttl_setters: [ ttl_setters: [Pleroma.Web.RichMedia.Parser.TTL.AwsSignedUrl]
Pleroma.Web.RichMedia.Parser.TTL.AwsSignedUrl,
Pleroma.Web.RichMedia.Parser.TTL.Opengraph
],
max_body: 5_000_000
config :pleroma, :media_proxy, config :pleroma, :media_proxy,
enabled: false, enabled: false,
@ -581,9 +576,7 @@
mute_expire: 5, mute_expire: 5,
search_indexing: 10, search_indexing: 10,
nodeinfo_fetcher: 1, nodeinfo_fetcher: 1,
database_prune: 1, database_prune: 1
rich_media_backfill: 2,
rich_media_expiration: 2
], ],
plugins: [ plugins: [
Oban.Plugins.Pruner, Oban.Plugins.Pruner,
@ -599,8 +592,7 @@
retries: [ retries: [
federator_incoming: 5, federator_incoming: 5,
federator_outgoing: 5, federator_outgoing: 5,
search_indexing: 2, search_indexing: 2
rich_media_backfill: 3
], ],
timeout: [ timeout: [
activity_expiration: :timer.seconds(5), activity_expiration: :timer.seconds(5),
@ -622,8 +614,7 @@
mute_expire: :timer.seconds(5), mute_expire: :timer.seconds(5),
search_indexing: :timer.seconds(5), search_indexing: :timer.seconds(5),
nodeinfo_fetcher: :timer.seconds(10), nodeinfo_fetcher: :timer.seconds(10),
database_prune: :timer.minutes(10), database_prune: :timer.minutes(10)
rich_media_backfill: :timer.seconds(30)
] ]
config :pleroma, Pleroma.Formatter, config :pleroma, Pleroma.Formatter,
@ -822,10 +813,8 @@
config :pleroma, configurable_from_database: false config :pleroma, configurable_from_database: false
config :pleroma, Pleroma.Repo, config :pleroma, Pleroma.Repo,
parameters: [ parameters: [gin_fuzzy_search_limit: "500"],
gin_fuzzy_search_limit: "500", prepare: :unnamed
plan_cache_mode: "force_custom_plan"
]
config :pleroma, :majic_pool, size: 2 config :pleroma, :majic_pool, size: 2

View file

@ -118,6 +118,14 @@
"font" "font"
] ]
}, },
%{
key: :proxy_remote,
type: :boolean,
description: """
Proxy requests to the remote uploader.\n
Useful if media upload endpoint is not internet accessible.
"""
},
%{ %{
key: :filename_display_max_length, key: :filename_display_max_length,
type: :integer, type: :integer,
@ -214,26 +222,6 @@
} }
] ]
}, },
%{
group: :pleroma,
key: Pleroma.Upload.Filter.Exiftool.StripMetadata,
type: :group,
description: "Strip specified metadata from image uploads",
children: [
%{
key: :purge,
description: "Metadata fields or groups to strip",
type: {:list, :string},
suggestions: ["all", "CommonIFD0"]
},
%{
key: :preserve,
description: "Metadata fields or groups to preserve (takes precedence over stripping)",
type: {:list, :string},
suggestions: ["ColorSpaces", "Orientation"]
}
]
},
%{ %{
group: :pleroma, group: :pleroma,
key: Pleroma.Emails.Mailer, key: Pleroma.Emails.Mailer,
@ -2709,8 +2697,8 @@
%{ %{
key: :pool_size, key: :pool_size,
type: :integer, type: :integer,
description: "Number of concurrent outbound HTTP requests to allow PER HOST. Default 10.", description: "Number of concurrent outbound HTTP requests to allow. Default 50.",
suggestions: [10] suggestions: [50]
}, },
%{ %{
key: :adapter, key: :adapter,
@ -2733,13 +2721,6 @@
] ]
} }
] ]
},
%{
key: :pool_max_idle_time,
type: :integer,
description:
"Number of seconds to retain an HTTP pool; pool will remain if actively in use. Default 30 seconds (in ms).",
suggestions: [30_000]
} }
] ]
}, },

25
config/dokku.exs Normal file
View file

@ -0,0 +1,25 @@
import Config
config :pleroma, Pleroma.Web.Endpoint,
http: [
port: String.to_integer(System.get_env("PORT") || "4000"),
protocol_options: [max_request_line_length: 8192, max_header_value_length: 8192]
],
protocol: "http",
secure_cookie_flag: false,
url: [host: System.get_env("APP_HOST"), scheme: "https", port: 443],
secret_key_base: "+S+ULgf7+N37c/lc9K66SMphnjQIRGklTu0BRr2vLm2ZzvK0Z6OH/PE77wlUNtvP"
database_url =
System.get_env("DATABASE_URL") ||
raise """
environment variable DATABASE_URL is missing.
For example: ecto://USER:PASS@HOST/DATABASE
"""
config :pleroma, Pleroma.Repo,
# ssl: true,
url: database_url,
pool_size: String.to_integer(System.get_env("POOL_SIZE") || "10")
config :pleroma, :instance, name: "#{System.get_env("APP_NAME")} CI Instance"

View file

@ -26,8 +26,6 @@
filters: [], filters: [],
link_name: false link_name: false
config :pleroma, :media_proxy, base_url: "http://localhost:4001"
config :pleroma, Pleroma.Uploaders.Local, uploads: "test/uploads" config :pleroma, Pleroma.Uploaders.Local, uploads: "test/uploads"
config :pleroma, Pleroma.Emails.Mailer, adapter: Swoosh.Adapters.Test, enabled: true config :pleroma, Pleroma.Emails.Mailer, adapter: Swoosh.Adapters.Test, enabled: true
@ -51,8 +49,7 @@
hostname: System.get_env("DB_HOST") || "localhost", hostname: System.get_env("DB_HOST") || "localhost",
pool: Ecto.Adapters.SQL.Sandbox, pool: Ecto.Adapters.SQL.Sandbox,
pool_size: 50, pool_size: 50,
queue_target: 5000, queue_target: 5000
log: false
config :pleroma, :dangerzone, override_repo_pool_size: true config :pleroma, :dangerzone, override_repo_pool_size: true
@ -64,8 +61,7 @@
config :pleroma, :rich_media, config :pleroma, :rich_media,
enabled: false, enabled: false,
ignore_hosts: [], ignore_hosts: [],
ignore_tld: ["local", "localdomain", "lan"], ignore_tld: ["local", "localdomain", "lan"]
max_body: 2_000_000
config :pleroma, :instance, config :pleroma, :instance,
multi_factor_authentication: [ multi_factor_authentication: [
@ -143,8 +139,6 @@
config :pleroma, :instances_favicons, enabled: false config :pleroma, :instances_favicons, enabled: false
config :pleroma, :instances_nodeinfo, enabled: false config :pleroma, :instances_nodeinfo, enabled: false
config :pleroma, Pleroma.Web.RichMedia.Backfill, provider: Pleroma.Web.RichMedia.Backfill
if File.exists?("./config/test.secret.exs") do if File.exists?("./config/test.secret.exs") do
import_config "test.secret.exs" import_config "test.secret.exs"
else else

7
coveralls.json Normal file
View file

@ -0,0 +1,7 @@
{
"skip_files": [
"test/support",
"lib/mix/tasks/pleroma/benchmark.ex",
"lib/credo/check/consistency/file_location.ex"
]
}

View file

@ -1,10 +0,0 @@
if [ "$#" -ne 2 ]; then
echo "Usage: binary-leak-checker.sh <nodename> <erlang cookie>"
exit 1
fi
echo "The command you want to run is:
:recon.bin_leak(10)
"
iex --sname debug --remsh $1 --erl "-setcookie $2"

View file

@ -46,7 +46,7 @@ services:
volumes: volumes:
- .:/opt/akkoma - .:/opt/akkoma
# Copy this into docker-compose.override.yml and uncomment there if you want to use a reverse proxy # Uncomment the following if you want to use a reverse proxy
#proxy: #proxy:
# image: caddy:2-alpine # image: caddy:2-alpine
# restart: unless-stopped # restart: unless-stopped

View file

@ -11,4 +11,4 @@ echo "-- Running migrations..."
mix ecto.migrate mix ecto.migrate
echo "-- Starting!" echo "-- Starting!"
elixir --erl "+sbwt none +sbwtdcpu none +sbwtdio none" -S mix phx.server mix phx.server

View file

@ -50,39 +50,9 @@ This will prune remote posts older than 90 days (configurable with [`config :ple
- `--keep-threads` - Don't prune posts when they are part of a thread where at least one post has seen local interaction (e.g. one of the posts is a local post, or is favourited by a local user, or has been repeated by a local user...). It also wont delete posts when at least one of the posts in that thread is kept (e.g. because one of the posts has seen recent activity). - `--keep-threads` - Don't prune posts when they are part of a thread where at least one post has seen local interaction (e.g. one of the posts is a local post, or is favourited by a local user, or has been repeated by a local user...). It also wont delete posts when at least one of the posts in that thread is kept (e.g. because one of the posts has seen recent activity).
- `--keep-non-public` - Keep non-public posts like DM's and followers-only, even if they are remote. - `--keep-non-public` - Keep non-public posts like DM's and followers-only, even if they are remote.
- `--limit` - limits how many remote posts get pruned. This limit does **not** apply to any of the follow up jobs. If wanting to keep the database load in check it is thus advisable to run the standalone `prune_orphaned_activities` task with a limit afterwards instead of passing `--prune-orphaned-activities` to this task.
- `--prune-orphaned-activities` - Also prune orphaned activities afterwards. Activities are things like Like, Create, Announce, Flag (aka reports)... They can significantly help reduce the database size. - `--prune-orphaned-activities` - Also prune orphaned activities afterwards. Activities are things like Like, Create, Announce, Flag (aka reports)... They can significantly help reduce the database size.
- `--vacuum` - Run `VACUUM FULL` after the objects are pruned. This should not be used on a regular basis, but is useful if your instance has been running for a long time before pruning. - `--vacuum` - Run `VACUUM FULL` after the objects are pruned. This should not be used on a regular basis, but is useful if your instance has been running for a long time before pruning.
## Prune orphaned activities from the database
This will prune activities which are no longer referenced by anything.
Such activities might be the result of running `prune_objects` without `--prune-orphaned-activities`.
The same notes and warnings apply as for `prune_objects`.
The task will print out how many rows were freed in total in its last
line of output in the form `Deleted 345 rows`.
When running the job in limited batches this can be used to determine
when all orphaned activities have been deleted.
=== "OTP"
```sh
./bin/pleroma_ctl database prune_orphaned_activities [option ...]
```
=== "From Source"
```sh
mix pleroma.database prune_orphaned_activities [option ...]
```
### Options
- `--limit n` - Only delete up to `n` activities in each query making up this job, i.e. if this job runs two queries at most `2n` activities will be deleted. Running this task repeatedly in limited batches can help maintain the instances responsiveness while still freeing up some space.
- `--no-singles` - Do not delete activites referencing single objects
- `--no-arrays` - Do not delete activites referencing an array of objects
## Create a conversation for all existing DMs ## Create a conversation for all existing DMs
Can be safely re-run Can be safely re-run

View file

@ -37,8 +37,7 @@ If any of the options are left unspecified, you will be prompted interactively.
- `--static-dir <path>` - the directory custom public files should be read from (custom emojis, frontend bundle overrides, robots.txt, etc.) - `--static-dir <path>` - the directory custom public files should be read from (custom emojis, frontend bundle overrides, robots.txt, etc.)
- `--listen-ip <ip>` - the ip the app should listen to, defaults to 127.0.0.1 - `--listen-ip <ip>` - the ip the app should listen to, defaults to 127.0.0.1
- `--listen-port <port>` - the port the app should listen to, defaults to 4000 - `--listen-port <port>` - the port the app should listen to, defaults to 4000
- `--strip-uploads-metadata <Y|N>` - use ExifTool to strip uploads of metadata when possible - `--strip-uploads <Y|N>` - use ExifTool to strip uploads of sensitive location data
- `--read-uploads-description <Y|N>` - use ExifTool to read image descriptions from uploads
- `--anonymize-uploads <Y|N>` - randomize uploaded filenames - `--anonymize-uploads <Y|N>` - randomize uploaded filenames
- `--dedupe-uploads <Y|N>` - store files based on their hash to reduce data storage requirements if duplicates are uploaded with different filenames - `--dedupe-uploads <Y|N>` - store files based on their hash to reduce data storage requirements if duplicates are uploaded with different filenames
- `--skip-release-env` - skip generation the release environment file - `--skip-release-env` - skip generation the release environment file

View file

@ -4,12 +4,12 @@
1. Stop the Akkoma service. 1. Stop the Akkoma service.
2. Go to the working directory of Akkoma (default is `/opt/akkoma`) 2. Go to the working directory of Akkoma (default is `/opt/akkoma`)
3. Run `sudo -Hu postgres pg_dump -d akkoma --format=custom -f </path/to/backup_location/akkoma.pgdump>`[¹] (make sure the postgres user has write access to the destination file) 3. Run[¹] `sudo -Hu postgres pg_dump -d akkoma --format=custom -f </path/to/backup_location/akkoma.pgdump>` (make sure the postgres user has write access to the destination file)
4. Copy `akkoma.pgdump`, `config/config.exs`[²], `uploads` folder, and [static directory](../configuration/static_dir.md) to your backup destination. If you have other modifications, copy those changes too. 4. Copy `akkoma.pgdump`, `config/prod.secret.exs`[²], `config/setup_db.psql` (if still available) and the `uploads` folder to your backup destination. If you have other modifications, copy those changes too.
5. Restart the Akkoma service. 5. Restart the Akkoma service.
[¹]: We assume the database name is "akkoma". If not, you can find the correct name in your configuration files. [¹]: We assume the database name is "akkoma". If not, you can find the correct name in your config files.
[²]: If you have a from source installation, you need `config/prod.secret.exs` instead of `config/config.exs`. The `config/config.exs` file also exists, but in case of from source installations, it only contains the default values and it is tracked by Git, so you don't need to back it up. [²]: If you've installed using OTP, you need `config/config.exs` instead of `config/prod.secret.exs`.
## Restore/Move ## Restore/Move
@ -17,16 +17,19 @@
2. Stop the Akkoma service. 2. Stop the Akkoma service.
3. Go to the working directory of Akkoma (default is `/opt/akkoma`) 3. Go to the working directory of Akkoma (default is `/opt/akkoma`)
4. Copy the above mentioned files back to their original position. 4. Copy the above mentioned files back to their original position.
5. Drop the existing database and user[¹]. `sudo -Hu postgres psql -c 'DROP DATABASE akkoma;';` `sudo -Hu postgres psql -c 'DROP USER akkoma;'` 5. Drop the existing database and user if restoring in-place[¹]. `sudo -Hu postgres psql -c 'DROP DATABASE akkoma;';` `sudo -Hu postgres psql -c 'DROP USER akkoma;'`
6. Restore the database schema and akkoma role[¹] (replace the password with the one you find in the configuration file), `sudo -Hu postgres psql -c "CREATE USER akkoma WITH ENCRYPTED PASSWORD '<database-password-wich-you-can-find-in-your-configuration-file>';"` `sudo -Hu postgres psql -c "CREATE DATABASE akkoma OWNER akkoma;"`. 6. Restore the database schema and akkoma role using either of the following options
* You can use the original `setup_db.psql` if you have it[²]: `sudo -Hu postgres psql -f config/setup_db.psql`.
* Or recreate the database and user yourself (replace the password with the one you find in the config file) `sudo -Hu postgres psql -c "CREATE USER akkoma WITH ENCRYPTED PASSWORD '<database-password-wich-you-can-find-in-your-config-file>'; CREATE DATABASE akkoma OWNER akkoma;"`.
7. Now restore the Akkoma instance's data into the empty database schema[¹]: `sudo -Hu postgres pg_restore -d akkoma -v -1 </path/to/backup_location/akkoma.pgdump>` 7. Now restore the Akkoma instance's data into the empty database schema[¹]: `sudo -Hu postgres pg_restore -d akkoma -v -1 </path/to/backup_location/akkoma.pgdump>`
8. If you installed a newer Akkoma version, you should run the database migrations `./bin/pleroma_ctl migrate`[²]. 8. If you installed a newer Akkoma version, you should run `MIX_ENV=prod mix ecto.migrate`[³]. This task performs database migrations, if there were any.
9. Restart the Akkoma service. 9. Restart the Akkoma service.
10. Run `sudo -Hu postgres vacuumdb --all --analyze-in-stages`. This will quickly generate the statistics so that postgres can properly plan queries. 10. Run `sudo -Hu postgres vacuumdb --all --analyze-in-stages`. This will quickly generate the statistics so that postgres can properly plan queries.
11. If setting up on a new server, configure Nginx by using the `installation/nginx/akkoma.nginx` configuration sample or reference the Akkoma installation guide which contains the Nginx configuration instructions. 11. If setting up on a new server configure Nginx by using the `installation/akkoma.nginx` config sample or reference the Akkoma installation guide for your OS which contains the Nginx configuration instructions.
[¹]: We assume the database name and user are both "akkoma". If not, you can find the correct name in your configuration files. [¹]: We assume the database name and user are both "akkoma". If not, you can find the correct name in your config files.
[²]: If you have a from source installation, the command is `MIX_ENV=prod mix ecto.migrate`. Note that we prefix with `MIX_ENV=prod` to use the `config/prod.secret.exs` configuration file. [²]: You can recreate the `config/setup_db.psql` by running the `mix pleroma.instance gen` task again. You can ignore most of the questions, but make the database user, name, and password the same as found in your backed up config file. This will also create a new `config/generated_config.exs` file which you may delete as it is not needed.
[³]: Prefix with `MIX_ENV=prod` to run it using the production config file.
## Remove ## Remove

View file

@ -1,15 +1,12 @@
# Akkoma Clients # Akkoma Clients
This is a list of clients that are known to work with Akkoma. Note: Additional clients may work, but these are known to work with Akkoma.
Apps listed here might not support all of Akkoma's features.
!!! warning
**Clients listed here are not officially supported by the Akkoma project.**
Some Akkoma features may be unsupported by these clients.
## Multiplatform ## Multiplatform
### Kaiteki ### Kaiteki
- Homepage: <https://kaiteki.app/> - Homepage: <https://kaiteki.app/>
- Source Code: <https://github.com/Kaiteki-Fedi/Kaiteki> - Source Code: <https://github.com/Kaiteki-Fedi/Kaiteki>
- Contact: [@kaiteki@social.kaiteki.app](https://social.kaiteki.app/@kaiteki) - Contact: [@kaiteki@fedi.software](https://fedi.software/@Kaiteki)
- Platforms: Web, Windows, Linux, Android - Platforms: Web, Windows, Linux, Android
- Features: MastoAPI, Supports multiple backends - Features: MastoAPI, Supports multiple backends
@ -41,6 +38,12 @@ This is a list of clients that are known to work with Akkoma.
- Platforms: Android - Platforms: Android
- Features: MastoAPI, No Streaming, Emoji Reactions, Text Formatting, FE Stickers - Features: MastoAPI, No Streaming, Emoji Reactions, Text Formatting, FE Stickers
### Fedi
- Homepage: <https://www.fediapp.com/>
- Source Code: Proprietary, but gratis
- Platforms: iOS, Android
- Features: MastoAPI, Pleroma-specific features like Reactions
### Tusky ### Tusky
- Homepage: <https://tuskyapp.github.io/> - Homepage: <https://tuskyapp.github.io/>
- Source Code: <https://github.com/tuskyapp/Tusky> - Source Code: <https://github.com/tuskyapp/Tusky>
@ -48,18 +51,12 @@ This is a list of clients that are known to work with Akkoma.
- Platforms: Android - Platforms: Android
- Features: MastoAPI, No Streaming - Features: MastoAPI, No Streaming
### Subway Tooter
- Source Code: <https://github.com/tateisu/SubwayTooter/>
- Contact: [@SubwayTooter@mastodon.juggler.jp](https://mastodon.juggler.jp/@SubwayTooter)
- Platforms: Android
- Features: MastoAPI, Editing, Emoji Reactions (including custom emoji)
## Alternative Web Interfaces ## Alternative Web Interfaces
### Enafore ### Pinafore
- An actively developed fork of Pinafore with improved Akkoma support - Note: Pinafore is unmaintained (See [the author's original article](https://nolanlawson.com/2023/01/09/retiring-pinafore/) for details)
- Homepage: <https://enafore.social/> - Homepage: <https://pinafore.social/>
- Source Code: <https://github.com/enafore/enafore> - Source Code: <https://github.com/nolanlawson/pinafore>
- Contact: [@enfore@enafore.social](https://meta.enafore.social/@enafore) - Contact: [@pinafore@mastodon.technology](https://mastodon.technology/users/pinafore)
- Features: MastoAPI, No Streaming - Features: MastoAPI, No Streaming
### Sengi ### Sengi

View file

@ -63,8 +63,6 @@ To add configuration to your config file, you can copy it from the base config.
* `local_bubble`: Array of domains representing instances closely related to yours. Used to populate the `bubble` timeline. e.g `["example.com"]`, (default: `[]`) * `local_bubble`: Array of domains representing instances closely related to yours. Used to populate the `bubble` timeline. e.g `["example.com"]`, (default: `[]`)
* `languages`: List of Language Codes used by the instance. This is used to try and set a default language from the frontend. It will try and find the first match between the languages set here and the user's browser languages. It will default to the first language in this setting if there is no match.. (default `["en"]`) * `languages`: List of Language Codes used by the instance. This is used to try and set a default language from the frontend. It will try and find the first match between the languages set here and the user's browser languages. It will default to the first language in this setting if there is no match.. (default `["en"]`)
* `export_prometheus_metrics`: Enable prometheus metrics, served at `/api/v1/akkoma/metrics`, requiring the `admin:metrics` oauth scope. * `export_prometheus_metrics`: Enable prometheus metrics, served at `/api/v1/akkoma/metrics`, requiring the `admin:metrics` oauth scope.
* `privileged_staff`: Set to `true` to give moderators access to a few higher responsibility actions.
* `federated_timeline_available`: Set to `false` to remove access to the federated timeline for all users.
## :database ## :database
* `improved_hashtag_timeline`: Setting to force toggle / force disable improved hashtags timeline. `:enabled` forces hashtags to be fetched from `hashtags` table for hashtags timeline. `:disabled` forces object-embedded hashtags to be used (slower). Keep it `:auto` for automatic behaviour (it is auto-set to `:enabled` [unless overridden] when HashtagsTableMigrator completes). * `improved_hashtag_timeline`: Setting to force toggle / force disable improved hashtags timeline. `:enabled` forces hashtags to be fetched from `hashtags` table for hashtags timeline. `:disabled` forces object-embedded hashtags to be used (slower). Keep it `:auto` for automatic behaviour (it is auto-set to `:enabled` [unless overridden] when HashtagsTableMigrator completes).
@ -338,7 +336,7 @@ config :pleroma, :frontends,
* `:primary` - The frontend that will be served at `/` * `:primary` - The frontend that will be served at `/`
* `:admin` - The frontend that will be served at `/pleroma/admin` * `:admin` - The frontend that will be served at `/pleroma/admin`
* `:swagger` - Config for developers to act as an API reference to be served at `/pleroma/swaggerui/` (trailing slash _needed_). Disabled by default. * `:swagger` - Config for developers to act as an API reference to be served at `/akkoma/swaggerui/` (trailing slash _needed_). Disabled by default.
* `:mastodon` - The mastodon-fe configuration. This shouldn't need to be changed. This is served at `/web` when installed. * `:mastodon` - The mastodon-fe configuration. This shouldn't need to be changed. This is served at `/web` when installed.
### :static\_fe ### :static\_fe
@ -605,6 +603,7 @@ the source code is here: [kocaptcha](https://github.com/koto-bank/kocaptcha). Th
* `link_name`: When enabled Akkoma will add a `name` parameter to the url of the upload, for example `https://instance.tld/media/corndog.png?name=corndog.png`. This is needed to provide the correct filename in Content-Disposition headers * `link_name`: When enabled Akkoma will add a `name` parameter to the url of the upload, for example `https://instance.tld/media/corndog.png?name=corndog.png`. This is needed to provide the correct filename in Content-Disposition headers
* `base_url`: The base URL to access a user-uploaded file; MUST be configured explicitly. * `base_url`: The base URL to access a user-uploaded file; MUST be configured explicitly.
Using a (sub)domain distinct from the instance endpoint is **strongly** recommended. A good value might be `https://media.myakkoma.instance/media/`. Using a (sub)domain distinct from the instance endpoint is **strongly** recommended. A good value might be `https://media.myakkoma.instance/media/`.
* `proxy_remote`: If you're using a remote uploader, Akkoma will proxy media requests instead of redirecting to it.
* `proxy_opts`: Proxy options, see `Pleroma.ReverseProxy` documentation. * `proxy_opts`: Proxy options, see `Pleroma.ReverseProxy` documentation.
* `filename_display_max_length`: Set max length of a filename to display. 0 = no limit. Default: 30. * `filename_display_max_length`: Set max length of a filename to display. 0 = no limit. Default: 30.
@ -655,17 +654,9 @@ This filter replaces the declared filename (not the path) of an upload.
* `text`: Text to replace filenames in links. If empty, `{random}.extension` will be used. You can get the original filename extension by using `{extension}`, for example `custom-file-name.{extension}`. * `text`: Text to replace filenames in links. If empty, `{random}.extension` will be used. You can get the original filename extension by using `{extension}`, for example `custom-file-name.{extension}`.
#### Pleroma.Upload.Filter.Exiftool.StripMetadata #### Pleroma.Upload.Filter.Exiftool
This filter strips metadata with Exiftool leaving color profiles and orientation intact. This filter only strips the GPS and location metadata with Exiftool leaving color profiles and attributes intact.
* `purge`: List of Exiftool tag names or tag group names to purge
* `preserve`: List of Exiftool tag names or tag group names to preserve even if they occur in the purge list
#### Pleroma.Upload.Filter.Exiftool.ReadDescription
This filter reads the ImageDescription and iptc:Caption-Abstract fields with Exiftool so clients can prefill the media description field.
No specific configuration. No specific configuration.

View file

@ -60,4 +60,4 @@ config :pleroma, :frontends,
Then run the [pleroma.frontend cli task](../../administration/CLI_tasks/frontend) with the name of `swagger-ui` to install the distribution files. Then run the [pleroma.frontend cli task](../../administration/CLI_tasks/frontend) with the name of `swagger-ui` to install the distribution files.
You will now be able to view documentation at `/pleroma/swaggerui` You will now be able to view documentation at `/akkoma/swaggerui`

View file

@ -6,17 +6,37 @@ With the `mediaproxy` function you can use nginx to cache this content, so users
## Activate it ## Activate it
* Edit your nginx config and add the following location to your main server block:
```
location /proxy {
return 404;
}
```
* Set up a subdomain for the proxy with its nginx config on the same machine * Set up a subdomain for the proxy with its nginx config on the same machine
* Edit the nginx config for the upload/MediaProxy subdomain to point to the subdomain that has been set up *(the latter is not strictly required, but for simplicity well assume so)*
* In this subdomains server block add
```
location /proxy {
proxy_cache akkoma_media_cache;
proxy_cache_lock on;
proxy_pass http://localhost:4000;
}
```
Also add the following on top of the configuration, outside of the `server` block:
```
proxy_cache_path /tmp/akkoma-media-cache levels=1:2 keys_zone=akkoma_media_cache:10m max_size=10g inactive=720m use_temp_path=off;
```
If you came here from one of the installation guides, take a look at the example configuration `/installation/nginx/akkoma.nginx`, where this part is already included.
* Append the following to your `prod.secret.exs` or `dev.secret.exs` (depends on which mode your instance is running): * Append the following to your `prod.secret.exs` or `dev.secret.exs` (depends on which mode your instance is running):
```elixir ```
# Replace media.example.td with the subdomain you set up earlier
config :pleroma, :media_proxy, config :pleroma, :media_proxy,
enabled: true, enabled: true,
proxy_opts: [ proxy_opts: [
redirect_on_failure: true redirect_on_failure: true
], ],
base_url: "https://media.example.tld" base_url: "https://cache.akkoma.social"
``` ```
You **really** should use a subdomain to serve proxied files; while we will fix bugs resulting from this, serving arbitrary remote content on your main domain namespace is a significant attack surface. You **really** should use a subdomain to serve proxied files; while we will fix bugs resulting from this, serving arbitrary remote content on your main domain namespace is a significant attack surface.

View file

@ -130,26 +130,59 @@ config :pleroma, :http_security,
enabled: false enabled: false
``` ```
In the Nginx config, add the following into the `location /` block: Use this as the Nginx config:
```nginx ```
proxy_cache_path /tmp/akkoma-media-cache levels=1:2 keys_zone=akkoma_media_cache:10m max_size=10g inactive=720m use_temp_path=off;
# The above already exists in a clearnet instance's config.
# If not, add it.
server {
listen 127.0.0.1:14447;
server_name youri2paddress;
# Comment to enable logs
access_log /dev/null;
error_log /dev/null;
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript application/activity+json application/atom+xml;
client_max_body_size 16m;
location / {
add_header X-XSS-Protection "0"; add_header X-XSS-Protection "0";
add_header X-Permitted-Cross-Domain-Policies none; add_header X-Permitted-Cross-Domain-Policies none;
add_header X-Frame-Options DENY; add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff; add_header X-Content-Type-Options nosniff;
add_header Referrer-Policy same-origin; add_header Referrer-Policy same-origin;
```
Change the `listen` directive to the following: proxy_http_version 1.1;
```nginx proxy_set_header Upgrade $http_upgrade;
listen 127.0.0.1:14447; proxy_set_header Connection "upgrade";
``` proxy_set_header Host $http_host;
Set `server_name` to your i2p address. proxy_pass http://localhost:4000;
Reload Nginx: client_max_body_size 16m;
}
location /proxy {
proxy_cache akkoma_media_cache;
proxy_cache_lock on;
proxy_ignore_client_abort on;
proxy_pass http://localhost:4000;
}
}
``` ```
systemctl restart i2pd.service --no-block reload Nginx:
systemctl reload nginx.service ```
systemctl stop i2pd.service --no-block
systemctl start i2pd.service
``` ```
*Notice:* The stop command initiates a graceful shutdown process, i2pd stops after finishing to route transit tunnels (maximum 10 minutes). *Notice:* The stop command initiates a graceful shutdown process, i2pd stops after finishing to route transit tunnels (maximum 10 minutes).

View file

@ -74,23 +74,56 @@ config :pleroma, :http_security,
enabled: false enabled: false
``` ```
In the Nginx config, add the following into the `location /` block: Use this as the Nginx config:
```nginx ```
proxy_cache_path /tmp/akkoma-media-cache levels=1:2 keys_zone=akkoma_media_cache:10m max_size=10g inactive=720m use_temp_path=off;
# The above already exists in a clearnet instance's config.
# If not, add it.
server {
listen 127.0.0.1:8099;
server_name youronionaddress;
# Comment to enable logs
access_log /dev/null;
error_log /dev/null;
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript application/activity+json application/atom+xml;
client_max_body_size 16m;
location / {
add_header X-XSS-Protection "0"; add_header X-XSS-Protection "0";
add_header X-Permitted-Cross-Domain-Policies none; add_header X-Permitted-Cross-Domain-Policies none;
add_header X-Frame-Options DENY; add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff; add_header X-Content-Type-Options nosniff;
add_header Referrer-Policy same-origin; add_header Referrer-Policy same-origin;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Host $http_host;
proxy_pass http://localhost:4000;
client_max_body_size 16m;
}
location /proxy {
proxy_cache akkoma_media_cache;
proxy_cache_lock on;
proxy_ignore_client_abort on;
proxy_pass http://localhost:4000;
}
}
``` ```
reload Nginx:
Change the `listen` directive to the following:
```nginx
listen 127.0.0.1:8099;
```
Set the `server_name` to your onion address.
Reload Nginx:
``` ```
systemctl reload nginx systemctl reload nginx
``` ```

View file

@ -4,10 +4,47 @@ Akkoma performance is largely dependent on performance of the underlying databas
## PGTune ## PGTune
[PgTune](https://pgtune.leopard.in.ua) can be used to get recommended settings. Make sure to set the DB type to "Online transaction processing system" for optimal performance. Also set the number of connections to between 25 and 30. This will allow each connection to have access to more resources while still leaving some room for running maintenance tasks while the instance is still running. [PgTune](https://pgtune.leopard.in.ua) can be used to get recommended settings. Be sure to set "Number of Connections" to 20, otherwise it might produce settings hurtful to database performance. It is also recommended to not use "Network Storage" option.
It is also recommended to not use "Network Storage" option. If your server runs other services, you may want to take that into account. E.g. if you have 4G ram, but 1G of it is already used for other services, it may be better to tell PGTune you only have 3G. In the end, PGTune only provides recomended settings, you can always try to finetune further.
If your server runs other services, you may want to take that into account. E.g. if you have 4G ram, but 1G of it is already used for other services, it may be better to tell PGTune you only have 3G. ### Example configurations
In the end, PGTune only provides recomended settings, you can always try to finetune further. Here are some configuration suggestions for PostgreSQL 10+.
#### 1GB RAM, 1 CPU
```
shared_buffers = 256MB
effective_cache_size = 768MB
maintenance_work_mem = 64MB
work_mem = 13107kB
```
#### 2GB RAM, 2 CPU
```
shared_buffers = 512MB
effective_cache_size = 1536MB
maintenance_work_mem = 128MB
work_mem = 26214kB
max_worker_processes = 2
max_parallel_workers_per_gather = 1
max_parallel_workers = 2
```
## Disable generic query plans
When PostgreSQL receives a query, it decides on a strategy for searching the requested data, this is called a query plan. The query planner has two modes: generic and custom. Generic makes a plan for all queries of the same shape, ignoring the parameters, which is then cached and reused. Custom, on the contrary, generates a unique query plan based on query parameters.
By default PostgreSQL has an algorithm to decide which mode is more efficient for particular query, however this algorithm has been observed to be wrong on some of the queries Akkoma sends, leading to serious performance loss. Therefore, it is recommended to disable generic mode.
Akkoma already avoids generic query plans by default, however the method it uses is not the most efficient because it needs to be compatible with all supported PostgreSQL versions. For PostgreSQL 12 and higher additional performance can be gained by adding the following to Akkoma configuration:
```elixir
config :pleroma, Pleroma.Repo,
prepare: :named,
parameters: [
plan_cache_mode: "force_custom_plan"
]
```
A more detailed explaination of the issue can be found at <https://blog.soykaf.com/post/postgresql-elixir-troubles/>.

View file

@ -33,7 +33,6 @@ indexes faster when it can process many posts in a single batch.
> config :pleroma, Pleroma.Search.Meilisearch, > config :pleroma, Pleroma.Search.Meilisearch,
> url: "http://127.0.0.1:7700/", > url: "http://127.0.0.1:7700/",
> private_key: "private key", > private_key: "private key",
> search_key: "search key",
> initial_indexing_chunk_size: 100_000 > initial_indexing_chunk_size: 100_000
Information about setting up meilisearch can be found in the Information about setting up meilisearch can be found in the
@ -46,7 +45,7 @@ is hardly usable on a somewhat big instance.
### Private key authentication (optional) ### Private key authentication (optional)
To set the private key, use the `MEILI_MASTER_KEY` environment variable when starting. After setting the _master key_, To set the private key, use the `MEILI_MASTER_KEY` environment variable when starting. After setting the _master key_,
you have to get the _private key_ and possibly _search key_, which are actually used for authentication. you have to get the _private key_, which is actually used for authentication.
=== "OTP" === "OTP"
```sh ```sh
@ -58,11 +57,7 @@ you have to get the _private key_ and possibly _search key_, which are actually
mix pleroma.search.meilisearch show-keys <your master key here> mix pleroma.search.meilisearch show-keys <your master key here>
``` ```
You will see a "Default Admin API Key", this is the key you actually put into You will see a "Default Admin API Key", this is the key you actually put into your configuration file.
your configuration file as `private_key`. You should also see a
"Default Search API key", put this into your config as `search_key`.
If your version of Meilisearch only showed the former,
just leave `search_key` completely unset in Akkoma's config.
### Initial indexing ### Initial indexing

View file

@ -6,7 +6,7 @@ as soon as the post is received by your instance.
## Nginx ## Nginx
The following are excerpts from the [suggested nginx config](https://akkoma.dev/AkkomaGang/akkoma/src/branch/develop/installation/nginx/akkoma.nginx) that demonstrates the necessary config for the media proxy to work. The following are excerpts from the [suggested nginx config](../../../installation/nginx/akkoma.nginx) that demonstrates the necessary config for the media proxy to work.
A `proxy_cache_path` must be defined, for example: A `proxy_cache_path` must be defined, for example:

View file

@ -1033,6 +1033,7 @@ Most of the settings will be applied in `runtime`, this means that you don't nee
- `:pools` - `:pools`
- partially settings inside these keys: - partially settings inside these keys:
- `:seconds_valid` in `Pleroma.Captcha` - `:seconds_valid` in `Pleroma.Captcha`
- `:proxy_remote` in `Pleroma.Upload`
- `:upload_limit` in `:instance` - `:upload_limit` in `:instance`
- Params: - Params:
@ -1093,6 +1094,7 @@ List of settings which support only full update by subkey:
{"tuple": [":uploader", "Pleroma.Uploaders.Local"]}, {"tuple": [":uploader", "Pleroma.Uploaders.Local"]},
{"tuple": [":filters", ["Pleroma.Upload.Filter.Dedupe"]]}, {"tuple": [":filters", ["Pleroma.Upload.Filter.Dedupe"]]},
{"tuple": [":link_name", true]}, {"tuple": [":link_name", true]},
{"tuple": [":proxy_remote", false]},
{"tuple": [":proxy_opts", [ {"tuple": [":proxy_opts", [
{"tuple": [":redirect_on_failure", false]}, {"tuple": [":redirect_on_failure", false]},
{"tuple": [":max_body_length", 1048576]}, {"tuple": [":max_body_length", 1048576]},

View file

@ -4,6 +4,7 @@
The following endpoints are additionally present into our actors. The following endpoints are additionally present into our actors.
- `oauthRegistrationEndpoint` (`http://litepub.social/ns#oauthRegistrationEndpoint`) - `oauthRegistrationEndpoint` (`http://litepub.social/ns#oauthRegistrationEndpoint`)
- `uploadMedia` (`https://www.w3.org/ns/activitystreams#uploadMedia`)
### oauthRegistrationEndpoint ### oauthRegistrationEndpoint
@ -11,279 +12,6 @@ Points to MastodonAPI `/api/v1/apps` for now.
See <https://docs.joinmastodon.org/methods/apps/> See <https://docs.joinmastodon.org/methods/apps/>
## Emoji reactions
Emoji reactions are implemented as a new activity type `EmojiReact`.
A single user is allowed to react multiple times with different emoji to the
same post. However, they may only react at most once with the same emoji.
Repeated reaction from the same user with the same emoji are to be ignored.
Emoji reactions are also distinct from `Like` activities and a user may both
`Like` and react to a post.
!!! note
Misskey also supports emoji reactions, but the implementations differs.
It equates likes and reactions and only allows a single reaction per post.
The emoji is placed in the `content` field of the activity
and the `object` property points to the note reacting to.
Emoji can either be any Unicode emoji sequence or a custom emoji.
The latter must place their shortcode, including enclosing colons,
into `content` and put the emoji object inside the `tag` property.
The `tag` property MAY be omitted for Unicode emoji.
An example reaction with a Unicode emoji:
```json
{
"@context": [
"https://www.w3.org/ns/activitystreams",
"https://example.org/schemas/litepub-0.1.jsonld",
{
"@language": "und"
}
],
"type": "EmojiReact",
"id": "https://example.org/activities/23143872a0346141",
"actor": "https://example.org/users/akko",
"nickname": "akko",
"to": ["https://remote.example/users/diana", "https://example.org/users/akko/followers"],
"cc": ["https://www.w3.org/ns/activitystreams#Public"],
"content": "🧡",
"object": "https://remote.example/objects/9f0e93499d8314a9"
}
```
An example reaction with a custom emoji:
```json
{
"@context": [
"https://www.w3.org/ns/activitystreams",
"https://example.org/schemas/litepub-0.1.jsonld",
{
"@language": "und"
}
],
"type": "EmojiReact",
"id": "https://example.org/activities/d75586dec0541650",
"actor": "https://example.org/users/akko",
"nickname": "akko",
"to": ["https://remote.example/users/diana", "https://example.org/users/akko/followers"],
"cc": ["https://www.w3.org/ns/activitystreams#Public"],
"content": ":mouse:",
"object": "https://remote.example/objects/9f0e93499d8314a9",
"tag": [{
"type": "Emoji",
"id": null,
"name": "mouse",
"icon": {
"type": "Image",
"url": "https://example.org/emoji/mouse/mouse.png"
}
}]
}
```
!!! note
Although an emoji reaction can only contain a single emoji,
for compatibility with older versions of Pleroma and Akkoma,
it is recommended to wrap the emoji object in a single-element array.
When reacting with a remote custom emoji do not include the remote domain in `content`s shortcode
*(unlike in our REST API which needs the domain)*:
```json
{
"@context": [
"https://www.w3.org/ns/activitystreams",
"https://example.org/schemas/litepub-0.1.jsonld",
{
"@language": "und"
}
],
"type": "EmojiReact",
"id": "https://example.org/activities/7993dcae98d8d5ec",
"actor": "https://example.org/users/akko",
"nickname": "akko",
"to": ["https://remote.example/users/diana", "https://example.org/users/akko/followers"],
"cc": ["https://www.w3.org/ns/activitystreams#Public"],
"content": ":hug:",
"object": "https://remote.example/objects/9f0e93499d8314a9",
"tag": [{
"type": "Emoji",
"id": "https://other.example/emojis/hug",
"name": "hug",
"icon": {
"type": "Image",
"url": "https://other.example/files/b71cea432b3fad67.webp"
}
}]
}
```
Emoji reactions can be retracted using a standard `Undo` activity:
```json
{
"@context": [
"https://www.w3.org/ns/activitystreams",
"http://example.org/schemas/litepub-0.1.jsonld",
{
"@language": "und"
}
],
"type": "Undo",
"id": "http://example.org/activities/4685792e-efb6-4309-b508-ae4f355dd695",
"actor": "https://example.org/users/akko",
"to": ["https://remote.example/users/diana", "https://example.org/users/akko/followers"],
"cc": ["https://www.w3.org/ns/activitystreams#Public"],
"object": "https://example.org/activities/23143872a0346141"
}
```
## User profile backgrounds
Akkoma federates user profile backgrounds the same way as Sharkey.
An actors ActivityPub representation contains an additional
`backgroundUrl` property containing an `Image` object. This property
belongs to the `"sharkey": "https://joinsharkey.org/ns#"` namespace.
## Quote Posts
Akkoma allows referencing a single other note as a quote,
which will be prominently displayed in the interface.
The quoted post is referenced by its ActivityPub id in the `quoteUri` property.
!!! note
Old Misskey only understood and modern Misskey still prefers
the `_misskey_quote` property for this. Similar some other older
software used `quoteUrl` or `quoteURL`.
All current implementations with quote support understand `quoteUri`.
Example:
```json
{
"@context": [
"https://www.w3.org/ns/activitystreams",
"https://example.org/schemas/litepub-0.1.jsonld",
{
"@language": "und"
}
],
"type": "Note",
"id": "https://example.org/activities/85717e587f95d5c0",
"actor": "https://example.org/users/akko",
"to": ["https://remote.example/users/diana", "https://example.org/users/akko/followers"],
"cc": ["https://www.w3.org/ns/activitystreams#Public"],
"context": "https://example.org/contexts/1",
"content": "Look at that!",
"quoteUri": "http://remote.example/status/85717e587f95d5c0",
"contentMap": {
"en": "Look at that!"
},
"source": {
"content": "Look at that!",
"mediaType": "text/plain"
},
"published": "2024-04-06T23:40:28Z",
"updated": "2024-04-06T23:40:28Z",
"attachemnt": [],
"tag": []
}
```
## Threads
Akkoma assigns all posts of the same thread the same `context`. This is a
standard ActivityPub property but its meaning is left vague. Akkoma will
always treat posts with identical `context` as part of the same thread.
`context` must not be assumed to hold any meaning or be dereferencable.
Incoming posts without `context` will be assigned a new context.
!!! note
Mastodon uses the non-standard `conversation` property for the same purpose
*(named after an older OStatus property)*. For incoming posts without
`context` but with `converstions` Akkoma will use the value from
`conversations` to fill in `context`.
For outgoing posts Akkoma will duplicate the context into `conversation`.
## Post Source
Unlike Mastodon, Akkoma supports drafting posts in multiple source formats
besides plaintext, like Markdown or MFM. The original input is preserved
in the standard ActivityPub `source` property *(not supported by Mastodon)*.
Still, `content` will always be present and contain the prerendered HTML form.
Supported `mediaType` include:
- `text/plain`
- `text/markdown`
- `text/bbcode`
- `text/x.misskeymarkdown`
## Post Language
!!! note
This is also supported in and compatible with Mastodon, but since
joinmastodon.org doesnt document it yet it is included here.
[GoToSocial](https://docs.gotosocial.org/en/latest/federation/federating_with_gotosocial/#content-contentmap-and-language)
has a more refined version of this which can correctly deal with multiple language entries.
A post can indicate its language by including a `contentMap` object
which contains a sub key named after the languages ISO 639-1 code
and its content identical to the posts `content` field.
Currently Akkoma, just like Mastodon, only properly supports a single language entry,
in case of multiple entries a random language will be picked.
Furthermore, Akkoma currently only reads the `content` field
and never the value from `contentMap`.
## Local post scope
Post using this scope will never federate to other servers
but for the sake of completeness it is listed here.
In addition to the usual scopes *(public, unlisted, followers-only, direct)*
Akkoma supports an “unlisted” post scope. Such posts will not federate to
other instances and only be shown to logged-in users on the same instance.
It is included into the local timeline.
This may be useful to discuss or announce instance-specific policies and topics.
A post is addressed to the local scope by including `<base url of instance>/#Public`
in its `to` field. E.g. if the instance is on `https://example.org` it would use
`https://example.org/#Public`.
An implementation creating a new post MUST NOT address both the local and
general public scope `as:Public` at the same time. A post addressing the local
scope MUST NOT be sent to other instances or be possible to fetch by other
instances regardless of potential other listed addressees.
When receiving a remote post addressing both the public scope and what appears
to be a local-scope identifier, the post SHOULD be treated without assigning any
special meaning to the potential local-scope identifier.
!!! note
Misskey-derivatives have a similar concept of non-federated posts,
however those are also shown publicly on the local web interface
and are thus visible to non-members.
## List post scope
Messages originally addressed to a custom list will contain
a `listMessage` field with an unresolvable pseudo ActivityPub id.
# Deprecated and Removed Extensions
The following extensions were used in the past but have been dropped.
Documentation is retained here as a reference and since old objects might
still contains related fields.
## Actor endpoints
The following endpoints used to be present:
- `uploadMedia` (`https://www.w3.org/ns/activitystreams#uploadMedia`)
### uploadMedia ### uploadMedia
Inspired by <https://www.w3.org/wiki/SocialCG/ActivityPub/MediaUpload>, it is part of the ActivityStreams namespace because it used to be part of the ActivityPub specification and got removed from it. Inspired by <https://www.w3.org/wiki/SocialCG/ActivityPub/MediaUpload>, it is part of the ActivityStreams namespace because it used to be part of the ActivityPub specification and got removed from it.
@ -292,8 +20,9 @@ Content-Type: multipart/form-data
Parameters: Parameters:
- (required) `file`: The file being uploaded - (required) `file`: The file being uploaded
- (optional) `description`: A plain-text description of the media, for accessibility purposes. - (optionnal) `description`: A plain-text description of the media, for accessibility purposes.
Response: HTTP 201 Created with the object into the body, no `Location` header provided as it doesn't have an `id` Response: HTTP 201 Created with the object into the body, no `Location` header provided as it doesn't have an `id`
The object given in the response should then be inserted into an Object's `attachment` field. The object given in the reponse should then be inserted into an Object's `attachment` field.

View file

@ -1,141 +0,0 @@
# Nodeinfo Extensions
Akkoma currently implements version 2.0 and 2.1 of nodeinfo spec,
but provides the following additional fields.
## metadata
The spec leaves the content of `metadata` up to implementations
and indeed Akkoma adds many fields here apart from the commonly
found `nodeName` and `nodeDescription` fields.
### accountActivationRequired
Whether or not users need to confirm their email before completing registration.
*(boolean)*
!!! note
Not to be confused with account approval, where each registration needs to
be manually approved by an admin. Account approval has no nodeinfo entry.
### features
Array of strings denoting supported server features. E.g. a server supporting
quote posts should include a `"quote_posting"` entry here.
A non-exhaustive list of possible features:
- `polls`
- `quote_posting`
- `editing`
- `bubble_timeline`
- `pleroma_emoji_reactions` *(Unicode emoji)*
- `custom_emoji_reactions`
- `akkoma_api`
- `akkoma:machine_translation`
- `mastodon_api`
- `pleroma_api`
### federatedTimelineAvailable
Whether or not the “federated timeline”, i.e. a timeline containing posts from
the entire known network, is made available.
*(boolean)*
### federation
This section is optional and can contain various custom keys describing federation policies.
The following are required to be presented:
- `enabled` *(boolean)* whether the server federates at all
A non-exhaustive list of optional keys:
- `exclusions` *(boolean)* whether some federation policies are withheld
- `mrf_simple` *(object)* describes how the Simple MRF policy is configured
### fieldsLimits
A JSON object documenting restriction for user account info fields.
All properties are integers.
- `maxFields` maximum number of account info fields local users can create
- `maxRemoteFields` maximum number of account info fields remote users can have
before the user gets rejected or fields truncated
- `nameLength` maximum length of a fields name
- `valueLength` maximum length of a fields value
### invitesEnabled
Whether or not signing up via invite codes is possible.
*(boolean)*
### localBubbleInstances
Array of domains (as strings) of other instances chosen
by the admin which are shown in the bubble timeline.
### mailerEnabled
Whether or not the instance can send out emails.
*(boolean)*
### nodeDescription
Human-friendly description of this instance
*(string)*
### nodeName
Human-friendly name of this instance
*(string)*
### pollLimits
JSON object containing limits for polls created by local users.
All values are integers.
- `max_options` maximum number of poll options
- `max_option_chars` maximum characters per poll option
- `min_expiration` minimum time in seconds a poll must be open for
- `max_expiration` maximum time a poll is allowed to be open for
### postFormats
Array of strings containing media types for supported post source formats.
A non-exhaustive list of possible values:
- `text/plain`
- `text/markdown`
- `text/bbcode`
- `text/x.misskeymarkdown`
### private
Whether or not unauthenticated API access is permitted.
*(boolean)*
### privilegedStaff
Whether or not moderators are trusted to perform some
additional tasks like e.g. issuing password reset emails.
### publicTimelineVisibility
JSON object containing boolean-valued keys reporting
if a given timeline can be viewed without login.
- `local`
- `federated`
- `bubble`
### restrictedNicknames
Array of strings listing nicknames forbidden to be used during signup.
### skipThreadContainment
Whether broken threads are filtered out
*(boolean)*
### staffAccounts
Array containing ActivityPub IDs of local accounts
with some form of elevated privilege on the instance.
### suggestions
JSON object containing info on whether the interaction-based
Mastodon `/api/v1/suggestions` feature is enabled and optionally
additional implementation-defined fields with more details
on e.g. how suggested users are selected.
!!! note
This has no relation to the newer /api/v2/suggestions API
which also (or exclusively) contains staff-curated entries.
- `enabled` *(boolean)* whether or not user recommendations are enabled
### uploadLimits
JSON object documenting various upload-related size limits.
All values are integers and in bytes.
- `avatar` maximum size of uploaded user avatars
- `banner` maximum size of uploaded user profile banners
- `background` maximum size of uploaded user profile backgrounds
- `general` maximum size for all other kinds of uploads

View file

@ -145,13 +145,47 @@ If you want to open your newly installed instance to the world, you should run n
doas apk add nginx doas apk add nginx
``` ```
* Setup your SSL cert, using your method of choice or certbot. If using certbot, first install it:
```shell
doas apk add certbot
```
and then set it up:
```shell
doas mkdir -p /var/lib/letsencrypt/
doas certbot certonly --email <your@emailaddress> -d <yourdomain> --standalone
```
If that doesnt work, make sure, that nginx is not already running. If it still doesnt work, try setting up nginx first (change ssl “on” to “off” and try again).
* Copy the example nginx configuration to the nginx folder * Copy the example nginx configuration to the nginx folder
```shell ```shell
doas cp /opt/akkoma/installation/nginx/akkoma.nginx /etc/nginx/conf.d/akkoma.conf doas cp /opt/akkoma/installation/nginx/akkoma.nginx /etc/nginx/conf.d/akkoma.conf
``` ```
* Before starting nginx edit the configuration and change it to your needs. You must change change `server_name`. You can use `nano` (install with `apk add nano` if missing). * Before starting nginx edit the configuration and change it to your needs. You must change change `server_name` and the paths to the certificates. You can use `nano` (install with `apk add nano` if missing).
```
server {
server_name your.domain;
listen 80;
...
}
server {
server_name your.domain;
listen 443 ssl http2;
...
ssl_trusted_certificate /etc/letsencrypt/live/your.domain/chain.pem;
ssl_certificate /etc/letsencrypt/live/your.domain/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/your.domain/privkey.pem;
...
}
```
* Enable and start nginx: * Enable and start nginx:
```shell ```shell
@ -159,37 +193,10 @@ doas rc-update add nginx
doas rc-service nginx start doas rc-service nginx start
``` ```
* Setup your SSL cert, using your method of choice or certbot. If using certbot, first install it: If you need to renew the certificate in the future, uncomment the relevant location block in the nginx config and run:
```shell ```shell
doas apk add certbot certbot-nginx doas certbot certonly --email <your@emailaddress> -d <yourdomain> --webroot -w /var/lib/letsencrypt/
```
and then set it up:
```shell
doas mkdir -p /var/lib/letsencrypt/
doas certbot --email <your@emailaddress> -d <yourdomain> -d <media_domain> --nginx
```
If that doesn't work the first time, add `--dry-run` to further attempts to avoid being ratelimited as you identify the issue, and do not remove it until the dry run succeeds. A common source of problems are nginx config syntax errors; this can be checked for by running `nginx -t`.
To automatically renew, set up a cron job like so:
```shell
# Enable the crond service
doas rc-update add crond
doas rc-service crond start
# Test that renewals work
doas certbot renew --cert-name yourinstance.tld --nginx --dry-run
# Add the renewal task to cron
echo '#!/bin/sh
certbot renew --cert-name yourinstance.tld --nginx
' | doas tee /etc/periodic/daily/renew-akkoma-cert
doas chmod +x /etc/periodic/daily/renew-akkoma-cert
``` ```
#### OpenRC service #### OpenRC service

View file

@ -136,17 +136,16 @@ If you want to open your newly installed instance to the world, you should run n
sudo pacman -S nginx sudo pacman -S nginx
``` ```
* Copy the example nginx configuration: * Create directories for available and enabled sites:
```shell ```shell
sudo cp /opt/akkoma/installation/nginx/akkoma.nginx /etc/nginx/conf.d/akkoma.conf sudo mkdir -p /etc/nginx/sites-{available,enabled}
``` ```
* Before starting nginx edit the configuration and change it to your needs (e.g. change servername, change cert paths) * Append the following line at the end of the `http` block in `/etc/nginx/nginx.conf`:
* Enable and start nginx:
```shell ```Nginx
sudo systemctl enable --now nginx.service include sites-enabled/*;
``` ```
* Setup your SSL cert, using your method of choice or certbot. If using certbot, first install it: * Setup your SSL cert, using your method of choice or certbot. If using certbot, first install it:
@ -159,18 +158,32 @@ and then set it up:
```shell ```shell
sudo mkdir -p /var/lib/letsencrypt/ sudo mkdir -p /var/lib/letsencrypt/
sudo certbot --email <your@emailaddress> -d <yourdomain> -d <media_domain> --nginx sudo certbot certonly --email <your@emailaddress> -d <yourdomain> --standalone
``` ```
If that doesn't work the first time, add `--dry-run` to further attempts to avoid being ratelimited as you identify the issue, and do not remove it until the dry run succeeds. A common source of problems are nginx config syntax errors; this can be checked for by running `nginx -t`. If that doesnt work, make sure, that nginx is not already running. If it still doesnt work, try setting up nginx first (change ssl “on” to “off” and try again).
To make sure renewals work, enable the appropriate systemd timer: ---
* Copy the example nginx configuration and activate it:
```shell ```shell
sudo systemctl enable --now certbot-renew.timer sudo cp /opt/akkoma/installation/nginx/akkoma.nginx /etc/nginx/sites-available/akkoma.nginx
sudo ln -s /etc/nginx/sites-available/akkoma.nginx /etc/nginx/sites-enabled/akkoma.nginx
``` ```
Certificate renewal should be handled automatically by Certbot from now on. * Before starting nginx edit the configuration and change it to your needs (e.g. change servername, change cert paths)
* Enable and start nginx:
```shell
sudo systemctl enable --now nginx.service
```
If you need to renew the certificate in the future, uncomment the relevant location block in the nginx config and run:
```shell
sudo certbot certonly --email <your@emailaddress> -d <yourdomain> --webroot -w /var/lib/letsencrypt/
```
#### Other webserver/proxies #### Other webserver/proxies

View file

@ -155,6 +155,23 @@ If you want to open your newly installed instance to the world, you should run n
sudo apt install nginx sudo apt install nginx
``` ```
* Setup your SSL cert, using your method of choice or certbot. If using certbot, first install it:
```shell
sudo apt install certbot
```
and then set it up:
```shell
sudo mkdir -p /var/lib/letsencrypt/
sudo certbot certonly --email <your@emailaddress> -d <yourdomain> --standalone
```
If that doesnt work, make sure, that nginx is not already running. If it still doesnt work, try setting up nginx first (change ssl “on” to “off” and try again).
---
* Copy the example nginx configuration and activate it: * Copy the example nginx configuration and activate it:
```shell ```shell
@ -169,23 +186,12 @@ sudo ln -s /etc/nginx/sites-available/akkoma.nginx /etc/nginx/sites-enabled/akko
sudo systemctl enable --now nginx.service sudo systemctl enable --now nginx.service
``` ```
* Setup your SSL cert, using your method of choice or certbot. If using certbot, first install it: If you need to renew the certificate in the future, uncomment the relevant location block in the nginx config and run:
```shell ```shell
sudo apt install certbot python3-certbot-nginx sudo certbot certonly --email <your@emailaddress> -d <yourdomain> --webroot -w /var/lib/letsencrypt/
``` ```
and then set it up:
```shell
sudo mkdir -p /var/lib/letsencrypt/
sudo certbot --email <your@emailaddress> -d <yourdomain> -d <media_domain> --nginx
```
If that doesn't work the first time, add `--dry-run` to further attempts to avoid being ratelimited as you identify the issue, and do not remove it until the dry run succeeds. A common source of problems are nginx config syntax errors; this can be checked for by running `nginx -t`.
Certificate renewal should be handled automatically by Certbot from now on.
#### Other webserver/proxies #### Other webserver/proxies
You can find example configurations for them in `/opt/akkoma/installation/`. You can find example configurations for them in `/opt/akkoma/installation/`.

View file

@ -125,26 +125,7 @@ cp docker-resources/Caddyfile.example docker-resources/Caddyfile
Then edit the TLD in your caddyfile to the domain you're serving on. Then edit the TLD in your caddyfile to the domain you're serving on.
Copy the commented out `caddy` section in `docker-compose.yml` into a new file called `docker-compose.override.yml` like so: Uncomment the `caddy` section in the docker compose file,
```yaml
version: "3.7"
services:
proxy:
image: caddy:2-alpine
restart: unless-stopped
links:
- akkoma
ports: [
"443:443",
"80:80"
]
volumes:
- ./docker-resources/Caddyfile:/etc/caddy/Caddyfile
- ./caddy-data:/data
- ./caddy-config:/config
```
then run `docker compose up -d` again. then run `docker compose up -d` again.
#### Running a reverse proxy on the host #### Running a reverse proxy on the host
@ -174,12 +155,6 @@ git pull
docker compose restart akkoma db docker compose restart akkoma db
``` ```
### Modifying the Docker services
If you want to modify the services defined in the docker compose file, you can
create a new file called `docker-compose.override.yml`. There you can add any
overrides or additional services without worrying about git conflicts when a
new release comes out.
#### Further reading #### Further reading
{! installation/further_reading.include !} {! installation/further_reading.include !}

View file

@ -135,6 +135,23 @@ If you want to open your newly installed instance to the world, you should run n
sudo dnf install nginx sudo dnf install nginx
``` ```
* Setup your SSL cert, using your method of choice or certbot. If using certbot, first install it:
```shell
sudo dnf install certbot
```
and then set it up:
```shell
sudo mkdir -p /var/lib/letsencrypt/
sudo certbot certonly --email <your@emailaddress> -d <yourdomain> --standalone
```
If that doesnt work, make sure, that nginx is not already running. If it still doesnt work, try setting up nginx first (change ssl “on” to “off” and try again).
---
* Copy the example nginx configuration and activate it: * Copy the example nginx configuration and activate it:
```shell ```shell
@ -148,23 +165,12 @@ sudo cp /opt/akkoma/installation/nginx/akkoma.nginx /etc/nginx/conf.d/akkoma.con
sudo systemctl enable --now nginx.service sudo systemctl enable --now nginx.service
``` ```
* Setup your SSL cert, using your method of choice or certbot. If using certbot, first install it: If you need to renew the certificate in the future, uncomment the relevant location block in the nginx config and run:
```shell ```shell
sudo dnf install certbot python3-certbot-nginx sudo certbot certonly --email <your@emailaddress> -d <yourdomain> --webroot -w /var/lib/letsencrypt/
``` ```
and then set it up:
```shell
sudo certbot --email <your@emailaddress> -d <yourdomain> -d <media_domain> --nginx
```
If that doesn't work the first time, add `--dry-run` to further attempts to avoid being ratelimited as you identify the issue, and do not remove it until the dry run succeeds. A common source of problems are nginx config syntax errors; this can be checked for by running `nginx -t`.
Certificate renewal should be handled automatically by Certbot from now on.
#### Other webserver/proxies #### Other webserver/proxies
You can find example configurations for them in `/opt/akkoma/installation/`. You can find example configurations for them in `/opt/akkoma/installation/`.

View file

@ -6,9 +6,7 @@ probably install frontends.
These are no longer bundled with the distribution and need an extra These are no longer bundled with the distribution and need an extra
command to install. command to install.
You **must** run frontend management tasks as the akkoma user, For most installations, the following will suffice:
the same way you downloaded the build or cloned the git repo before.
But otherwise, for most installations, the following will suffice:
=== "OTP" === "OTP"
```sh ```sh
@ -30,3 +28,4 @@ But otherwise, for most installations, the following will suffice:
``` ```
For more customised installations, refer to [Frontend Management](../../configuration/frontend_management) For more customised installations, refer to [Frontend Management](../../configuration/frontend_management)

View file

@ -1,8 +1,8 @@
## Required dependencies ## Required dependencies
* PostgreSQL 12+ * PostgreSQL 9.6+
* Elixir 1.14+ (currently tested up to 1.16) * Elixir 1.14+
* Erlang OTP 25+ (currently tested up to OTP26) * Erlang OTP 25+
* git * git
* file / libmagic * file / libmagic
* gcc (clang might also work) * gcc (clang might also work)

View file

@ -201,6 +201,25 @@ Assuming you want to open your newly installed federated social network to, well
include sites-enabled/*; include sites-enabled/*;
``` ```
* Setup your SSL cert, using your method of choice or certbot. If using certbot, install it if you haven't already:
```shell
# emerge --ask app-crypt/certbot app-crypt/certbot-nginx
```
and then set it up:
```shell
# mkdir -p /var/lib/letsencrypt/
# certbot certonly --email <your@emailaddress> -d <yourdomain> --standalone
```
If that doesn't work the first time, add `--dry-run` to further attempts to avoid being ratelimited as you identify the issue, and do not remove it until the dry run succeeds. If that doesnt work, make sure, that nginx is not already running. If it still doesnt work, try setting up nginx first (change ssl “on” to “off” and try again). Often the answer to issues with certbot is to use the `--nginx` flag once you have nginx up and running.
If you are using any additional subdomains, such as for a media proxy, you can re-run the same command with the subdomain in question. When it comes time to renew later, you will not need to run multiple times for each domain, one renew will handle it.
---
* Copy the example nginx configuration and activate it: * Copy the example nginx configuration and activate it:
```shell ```shell
@ -218,24 +237,9 @@ Pay special attention to the line that begins with `ssl_ecdh_curve`. It is stong
```shell ```shell
# rc-update add nginx default # rc-update add nginx default
# rc-service nginx start # /etc/init.d/nginx start
``` ```
* Setup your SSL cert, using your method of choice or certbot. If using certbot, install it if you haven't already:
```shell
# emerge --ask app-crypt/certbot app-crypt/certbot-nginx
```
and then set it up:
```shell
# mkdir -p /var/lib/letsencrypt/
# certbot --email <your@emailaddress> -d <yourdomain> -d <media_domain> --nginx
```
If that doesn't work the first time, add `--dry-run` to further attempts to avoid being ratelimited as you identify the issue, and do not remove it until the dry run succeeds. A common source of problems are nginx config syntax errors; this can be checked for by running `nginx -t`.
If you are using certbot, it is HIGHLY recommend you set up a cron job that renews your certificate, and that you install the suggested `certbot-nginx` plugin. If you don't do these things, you only have yourself to blame when your instance breaks suddenly because you forgot about it. If you are using certbot, it is HIGHLY recommend you set up a cron job that renews your certificate, and that you install the suggested `certbot-nginx` plugin. If you don't do these things, you only have yourself to blame when your instance breaks suddenly because you forgot about it.
First, ensure that the command you will be installing into your crontab works. First, ensure that the command you will be installing into your crontab works.

View file

@ -14,7 +14,7 @@ Note: the packages are not required with the current default settings of Akkoma.
`ImageMagick` is a set of tools to create, edit, compose, or convert bitmap images. `ImageMagick` is a set of tools to create, edit, compose, or convert bitmap images.
It is required for the following Akkoma features: It is required for the following Akkoma features:
* `Pleroma.Upload.Filters.Mogrify`, `Pleroma.Upload.Filters.Mogrifun` upload filters (related config: `Pleroma.Upload/filters` in `config/config.exs`) * `Pleroma.Upload.Filters.Mogrify`, `Pleroma.Upload.Filters.Mogrifun` upload filters (related config: `Plaroma.Upload/filters` in `config/config.exs`)
* Media preview proxy for still images (related config: `media_preview_proxy/enabled` in `config/config.exs`) * Media preview proxy for still images (related config: `media_preview_proxy/enabled` in `config/config.exs`)
## `ffmpeg` ## `ffmpeg`
@ -29,5 +29,4 @@ It is required for the following Akkoma features:
`exiftool` is media files metadata reader/writer. `exiftool` is media files metadata reader/writer.
It is required for the following Akkoma features: It is required for the following Akkoma features:
* `Pleroma.Upload.Filters.Exiftool.StripMetadata` upload filter (related config: `Pleroma.Upload/filters` in `config/config.exs`) * `Pleroma.Upload.Filters.Exiftool` upload filter (related config: `Plaroma.Upload/filters` in `config/config.exs`)
* `Pleroma.Upload.Filters.Exiftool.ReadDescription` upload filter (related config: `Pleroma.Upload/filters` in `config/config.exs`)

View file

@ -9,7 +9,7 @@ This guide covers a installation using an OTP release. To install Akkoma from so
* For installing OTP releases on RedHat-based distros like Fedora and Centos Stream, please follow [this guide](./otp_redhat_en.md) instead. * For installing OTP releases on RedHat-based distros like Fedora and Centos Stream, please follow [this guide](./otp_redhat_en.md) instead.
* A (sub)domain pointed to the machine * A (sub)domain pointed to the machine
You will be running commands as root. If you aren't root already, please elevate your priviledges by executing `sudo -i`/`su`. You will be running commands as root. If you aren't root already, please elevate your priviledges by executing `sudo su`/`su`.
While in theory OTP releases are possbile to install on any compatible machine, for the sake of simplicity this guide focuses only on Debian/Ubuntu and Alpine. While in theory OTP releases are possbile to install on any compatible machine, for the sake of simplicity this guide focuses only on Debian/Ubuntu and Alpine.
@ -176,6 +176,11 @@ su akkoma -s $SHELL -lc "./bin/pleroma stop"
### Setting up nginx and getting Let's Encrypt SSL certificaties ### Setting up nginx and getting Let's Encrypt SSL certificaties
#### Get a Let's Encrypt certificate
```sh
certbot certonly --standalone --preferred-challenges http -d yourinstance.tld
```
#### Copy Akkoma nginx configuration to the nginx folder #### Copy Akkoma nginx configuration to the nginx folder
The location of nginx configs is dependent on the distro The location of nginx configs is dependent on the distro
@ -204,14 +209,6 @@ $EDITOR path-to-nginx-config
# Verify that the config is valid # Verify that the config is valid
nginx -t nginx -t
``` ```
#### Get a Let's Encrypt certificate
```sh
certbot --nginx -d yourinstance.tld -d media.yourinstance.tld
```
If that doesn't work the first time, add `--dry-run` to further attempts to avoid being ratelimited as you identify the issue, and do not remove it until the dry run succeeds. A common source of problems are nginx config syntax errors; this can be checked for by running `nginx -t`.
#### Start nginx #### Start nginx
=== "Alpine" === "Alpine"
@ -255,19 +252,32 @@ If everything worked, you should see Akkoma-FE when visiting your domain. If tha
## Post installation ## Post installation
### Setting up auto-renew of the Let's Encrypt certificate ### Setting up auto-renew of the Let's Encrypt certificate
```sh
# Create the directory for webroot challenges
mkdir -p /var/lib/letsencrypt
# Uncomment the webroot method
$EDITOR path-to-nginx-config
# Verify that the config is valid
nginx -t
```
=== "Alpine" === "Alpine"
``` ```
# Restart nginx
rc-service nginx restart
# Start the cron daemon and make it start on boot # Start the cron daemon and make it start on boot
rc-service crond start rc-service crond start
rc-update add crond rc-update add crond
# Ensure the webroot menthod and post hook is working # Ensure the webroot menthod and post hook is working
certbot renew --cert-name yourinstance.tld --nginx --dry-run certbot renew --cert-name yourinstance.tld --webroot -w /var/lib/letsencrypt/ --dry-run --post-hook 'rc-service nginx reload'
# Add it to the daily cron # Add it to the daily cron
echo '#!/bin/sh echo '#!/bin/sh
certbot renew --cert-name yourinstance.tld --nginx certbot renew --cert-name yourinstance.tld --webroot -w /var/lib/letsencrypt/ --post-hook "rc-service nginx reload"
' > /etc/periodic/daily/renew-akkoma-cert ' > /etc/periodic/daily/renew-akkoma-cert
chmod +x /etc/periodic/daily/renew-akkoma-cert chmod +x /etc/periodic/daily/renew-akkoma-cert
@ -276,7 +286,22 @@ If everything worked, you should see Akkoma-FE when visiting your domain. If tha
``` ```
=== "Debian/Ubuntu" === "Debian/Ubuntu"
This should be automatically enabled with the `certbot-renew.timer` systemd unit. ```
# Restart nginx
systemctl restart nginx
# Ensure the webroot menthod and post hook is working
certbot renew --cert-name yourinstance.tld --webroot -w /var/lib/letsencrypt/ --dry-run --post-hook 'systemctl reload nginx'
# Add it to the daily cron
echo '#!/bin/sh
certbot renew --cert-name yourinstance.tld --webroot -w /var/lib/letsencrypt/ --post-hook "systemctl reload nginx"
' > /etc/cron.daily/renew-akkoma-cert
chmod +x /etc/cron.daily/renew-akkoma-cert
# If everything worked the output should contain /etc/cron.daily/renew-akkoma-cert
run-parts --test /etc/cron.daily
```
## Create your first user and set as admin ## Create your first user and set as admin
```sh ```sh

View file

@ -82,7 +82,6 @@ Other than things bundled in the OTP release Akkoma depends on:
* PostgreSQL (also utilizes extensions in postgresql-contrib) * PostgreSQL (also utilizes extensions in postgresql-contrib)
* nginx (could be swapped with another reverse proxy but this guide covers only it) * nginx (could be swapped with another reverse proxy but this guide covers only it)
* certbot (for Let's Encrypt certificates, could be swapped with another ACME client, but this guide covers only it) * certbot (for Let's Encrypt certificates, could be swapped with another ACME client, but this guide covers only it)
* If you are using certbot, also install the `python3-certbot-nginx` package for the nginx plugin
* libmagic/file * libmagic/file
First, update your system, if not already done: First, update your system, if not already done:
@ -170,6 +169,12 @@ sudo -Hu akkoma ./bin/pleroma stop
### Setting up nginx and getting Let's Encrypt SSL certificaties ### Setting up nginx and getting Let's Encrypt SSL certificaties
#### Get a Let's Encrypt certificate
```shell
certbot certonly --standalone --preferred-challenges http -d yourinstance.tld
```
#### Copy Akkoma nginx configuration to the nginx folder #### Copy Akkoma nginx configuration to the nginx folder
```shell ```shell
@ -190,15 +195,8 @@ sudo nginx -t
sudo systemctl start nginx sudo systemctl start nginx
``` ```
#### Get a Let's Encrypt certificate At this point if you open your (sub)domain in a browser you should see a 502 error, that's because Akkoma is not started yet.
```shell
sudo certbot --email <your@emailaddress> -d <yourdomain> -d <media_domain> --nginx
```
If that doesn't work the first time, add `--dry-run` to further attempts to avoid being ratelimited as you identify the issue, and do not remove it until the dry run succeeds. A common source of problems are nginx config syntax errors; this can be checked for by running `nginx -t`.
If you're successful with obtaining the certificates, opening your (sub)domain in a browser will result in a 502 error, since Akkoma hasn't been started yet.
### Setting up a system service ### Setting up a system service
@ -241,11 +239,19 @@ sudo nginx -t
# Restart nginx # Restart nginx
sudo systemctl restart nginx sudo systemctl restart nginx
# Test that renewals work properly # Ensure the webroot menthod and post hook is working
sudo certbot renew --cert-name yourinstance.tld --nginx --dry-run sudo certbot renew --cert-name yourinstance.tld --webroot -w /var/lib/letsencrypt/ --dry-run --post-hook 'systemctl reload nginx'
# Add it to the daily cron
echo '#!/bin/sh
certbot renew --cert-name yourinstance.tld --webroot -w /var/lib/letsencrypt/ --post-hook "systemctl reload nginx"
' > /etc/cron.daily/renew-akkoma-cert
sudo chmod +x /etc/cron.daily/renew-akkoma-cert
# If everything worked the output should contain /etc/cron.daily/renew-akkoma-cert
sudo run-parts --test /etc/cron.daily
``` ```
Assuming the commands were run successfully, certbot should be able to renew your certificates automatically via the `certbot-renew.timer` systemd unit.
## Create your first user and set as admin ## Create your first user and set as admin
```shell ```shell

2
elixir_buildpack.config Normal file
View file

@ -0,0 +1,2 @@
elixir_version=1.14.3
erlang_version=25.3

View file

@ -19,9 +19,6 @@ Environment="MIX_ENV=prod"
; Don't listen epmd on 0.0.0.0 ; Don't listen epmd on 0.0.0.0
Environment="ERL_EPMD_ADDRESS=127.0.0.1" Environment="ERL_EPMD_ADDRESS=127.0.0.1"
; Don't busy wait
Environment="ERL_AFLAGS=+sbwt none +sbwtdcpu none +sbwtdio none"
; Make sure that all paths fit your installation. ; Make sure that all paths fit your installation.
; Path to the home directory of the user running the Akkoma service. ; Path to the home directory of the user running the Akkoma service.
Environment="HOME=/var/lib/akkoma" Environment="HOME=/var/lib/akkoma"

View file

@ -60,7 +60,7 @@ ServerTokens Prod
Include /etc/letsencrypt/options-ssl-apache.conf Include /etc/letsencrypt/options-ssl-apache.conf
# Uncomment the following to enable MediaProxy caching on disk # Uncomment the following to enable MediaProxy caching on disk
#CacheRoot /var/tmp/akkoma-media-cache/ #CacheRoot /tmp/akkoma-media-cache/
#CacheDirLevels 1 #CacheDirLevels 1
#CacheDirLength 2 #CacheDirLength 2
#CacheEnable disk /proxy #CacheEnable disk /proxy

View file

@ -16,7 +16,7 @@
SCRIPTNAME=${0##*/} SCRIPTNAME=${0##*/}
# mod_disk_cache directory # mod_disk_cache directory
CACHE_DIRECTORY="/var/tmp/akkoma-media-cache" CACHE_DIRECTORY="/tmp/akkoma-media-cache"
## Removes an item via the htcacheclean utility ## Removes an item via the htcacheclean utility
## $1 - the filename, can be a pattern . ## $1 - the filename, can be a pattern .

View file

@ -12,22 +12,26 @@ example.tld {
output file /var/log/caddy/akkoma.log output file /var/log/caddy/akkoma.log
} }
encode gzip
# this is explicitly IPv4 since Pleroma.Web.Endpoint binds on IPv4 only # this is explicitly IPv4 since Pleroma.Web.Endpoint binds on IPv4 only
# and `localhost.` resolves to [::0] on some systems: see issue #930 # and `localhost.` resolves to [::0] on some systems: see issue #930
reverse_proxy 127.0.0.1:4000 reverse_proxy 127.0.0.1:4000
@mediaproxy path /media/* /proxy/* # Uncomment if using a separate media subdomain
handle @mediaproxy { #@mediaproxy path /media/* /proxy/*
redir https://media.example.tld{uri} permanent #handle @mediaproxy {
} # redir https://media.example.tld{uri} permanent
#}
} }
media.example.tld { # Uncomment if using a separate media subdomain
@mediaproxy path /media/* /proxy/* #media.example.tld {
reverse_proxy @mediaproxy 127.0.0.1:4000 { # @mediaproxy path /media/* /proxy/*
transport http { # reverse_proxy @mediaproxy 127.0.0.1:4000 {
response_header_timeout 10s # transport http {
read_timeout 15s # response_header_timeout 10s
} # read_timeout 15s
} # }
} # }
#}

View file

@ -1,43 +1,23 @@
#!/sbin/openrc-run #!/sbin/openrc-run
supervisor=supervise-daemon supervisor=supervise-daemon
no_new_privs="yes" command_user=akkoma:akkoma
command_background=1
# Ask process to terminate within 30 seconds, otherwise kill it
retry="SIGTERM/30/SIGKILL/5"
pidfile="/var/run/akkoma.pid" pidfile="/var/run/akkoma.pid"
directory=/opt/akkoma
healthcheck_delay=60
healthcheck_timer=30
no_new_privs="yes"
# Ask process first to terminate itself within 60s, otherwise kill it : ${akkoma_port:-4000}
retry="SIGTERM/60/SIGKILL/5"
# if you really want to use start-stop-daemon instead, # Needs OpenRC >= 0.42
# also put the following in the config: #respawn_max=0
# command_background=1 #respawn_delay=5
# Adjust defaults as needed in /etc/conf.d/akkoma;
# no need to directly edit the service file
command_user="${command_user:-akkoma:akkoma}"
directory="${directory:-/var/lib/akkoma/akkoma}"
akkoma_port="${akkoma_port:-4000}"
# whether to allow connecting a remote exlixir shell to the running Akkoma instance
akkoma_console=${akkoma_console:-NO}
output_log="${output_log:-/var/log/akkoma}"
error_log="${error_log:-/var/log/akkoma}"
# 0 means unlimited restarts
respawn_max="${respawn_max:-0}"
respawn_delay="${respawn_delay:-5}"
# define respawn period to only count crashes within a
# sliding time window towards respawn_max, e.g.:
# respawn_period=2850
healthcheck_delay="${healthcheck_delay:-60}"
healthcheck_timer="${healthcheck_timer:-30}"
MIX_ENV=prod
ERL_EPMD_ADDRESS="${ERL_EPMD_ADDRESS:-127.0.0.1}"
ERL_AFLAGS="${ERL_AFLAGS:-+sbwt none +sbwtdcpu none +sbwtdio none}"
supervise_daemon_args="${supervise_daemon_args} --env MIX_ENV=${MIX_ENV}"
supervise_daemon_args="${supervise_daemon_args} --env ERL_EPMD_ADDRESS=${ERL_EPMD_ADDRESS}"
supervise_daemon_args="${supervise_daemon_args} --env ERL_AFLAGS='${ERL_AFLAGS}'"
# put akkoma_console=YES in /etc/conf.d/akkoma if you want to be able to
# connect to akkoma via an elixir console
if yesno "${akkoma_console}"; then if yesno "${akkoma_console}"; then
command=elixir command=elixir
command_args="--name akkoma@127.0.0.1 --erl '-kernel inet_dist_listen_min 9001 inet_dist_listen_max 9001 inet_dist_use_interface {127,0,0,1}' -S mix phx.server" command_args="--name akkoma@127.0.0.1 --erl '-kernel inet_dist_listen_min 9001 inet_dist_listen_max 9001 inet_dist_use_interface {127,0,0,1}' -S mix phx.server"
@ -51,24 +31,13 @@ else
command_args="phx.server" command_args="phx.server"
fi fi
export MIX_ENV=prod
export ERL_EPMD_ADDRESS=127.0.0.1
depend() { depend() {
need nginx postgresql need nginx postgresql
} }
start_pre() {
# Ensure logfile ownership and perms are alright
checkpath --file --owner "$command_user" "$output_log" "$error_log" \
|| eerror "Logfile(s) not owned by $command_user, or not a file!"
checkpath --writable "$output_log" "$error_log" \
|| eerror "Logfile(s) not writable!"
# If a recompile is needed perform it with lowest prio
# (delaying the actual start) to avoid hogging too much
# CPU from other services
cd "$directory"
doas -u "${command_user%%:*}" env MIX_ENV="$MIX_ENV" nice -n 19 "$command" compile
}
healthcheck() { healthcheck() {
# put akkoma_health=YES in /etc/conf.d/akkoma if you want healthchecking # put akkoma_health=YES in /etc/conf.d/akkoma if you want healthchecking
# and make sure you have curl installed # and make sure you have curl installed

View file

@ -1,9 +1,12 @@
# default nginx site config for Akkoma # default nginx site config for Akkoma
# #
# See the documentation at docs.akkoma.dev for your particular distro/OS for # Simple installation instructions:
# installation instructions. # 1. Install your TLS certificate, possibly using Let's Encrypt.
# 2. Replace 'example.tld' with your instance's domain wherever it appears.
# 3. Copy this file to /etc/nginx/sites-available/ and then add a symlink to it
# in /etc/nginx/sites-enabled/ and run 'nginx -s reload' or restart nginx.
proxy_cache_path /var/tmp/akkoma-media-cache levels=1:2 keys_zone=akkoma_media_cache:10m max_size=1g proxy_cache_path /tmp/akkoma-media-cache levels=1:2 keys_zone=akkoma_media_cache:10m max_size=10g
inactive=720m use_temp_path=off; inactive=720m use_temp_path=off;
# this is explicitly IPv4 since Pleroma.Web.Endpoint binds on IPv4 only # this is explicitly IPv4 since Pleroma.Web.Endpoint binds on IPv4 only
@ -12,19 +15,25 @@ upstream phoenix {
server 127.0.0.1:4000 max_fails=5 fail_timeout=60s; server 127.0.0.1:4000 max_fails=5 fail_timeout=60s;
} }
# If you are setting up TLS certificates without certbot, uncomment the server {
# following to enable HTTP -> HTTPS redirects. Certbot users don't need to do server_name example.tld;
# this as it will automatically do this for you.
# server { listen 80;
# server_name example.tld media.example.tld; listen [::]:80;
#
# listen 80; # Uncomment this if you need to use the 'webroot' method with certbot. Make sure
# listen [::]:80; # that the directory exists and that it is accessible by the webserver. If you followed
# # the guide, you already ran 'mkdir -p /var/lib/letsencrypt' to create the folder.
# location / { # You may need to load this file with the ssl server block commented out, run certbot
# return 301 https://$server_name$request_uri; # to get the certificate, and then uncomment it.
# } #
# } # location ~ /\.well-known/acme-challenge {
# root /var/lib/letsencrypt/;
# }
location / {
return 301 https://$server_name$request_uri;
}
}
# Enable SSL session caching for improved performance # Enable SSL session caching for improved performance
ssl_session_cache shared:ssl_session_cache:10m; ssl_session_cache shared:ssl_session_cache:10m;
@ -32,29 +41,22 @@ ssl_session_cache shared:ssl_session_cache:10m;
server { server {
server_name example.tld; server_name example.tld;
# Once certbot is set up, this will automatically be updated to listen to listen 443 ssl http2;
# port 443 with TLS alongside a redirect from plaintext HTTP. listen [::]:443 ssl http2;
listen 80; ssl_session_timeout 1d;
listen [::]:80; ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
ssl_session_tickets off;
# If you are not using Certbot, comment out the above and uncomment/edit the following ssl_trusted_certificate /etc/letsencrypt/live/example.tld/chain.pem;
# listen 443 ssl http2; ssl_certificate /etc/letsencrypt/live/example.tld/fullchain.pem;
# listen [::]:443 ssl http2; ssl_certificate_key /etc/letsencrypt/live/example.tld/privkey.pem;
# ssl_session_timeout 1d;
# ssl_session_cache shared:MozSSL:10m; # about 40000 sessions
# ssl_session_tickets off;
#
# ssl_trusted_certificate /etc/letsencrypt/live/example.tld/chain.pem;
# ssl_certificate /etc/letsencrypt/live/example.tld/fullchain.pem;
# ssl_certificate_key /etc/letsencrypt/live/example.tld/privkey.pem;
#
# ssl_protocols TLSv1.2 TLSv1.3;
# ssl_ciphers "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4";
# ssl_prefer_server_ciphers off;
# ssl_ecdh_curve X25519:prime256v1:secp384r1:secp521r1;
# ssl_stapling on;
# ssl_stapling_verify on;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_ciphers "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:!aNULL:!eNULL:!EXPORT:!DES:!MD5:!PSK:!RC4";
ssl_prefer_server_ciphers off;
ssl_ecdh_curve X25519:prime256v1:secp384r1:secp521r1;
ssl_stapling on;
ssl_stapling_verify on;
gzip_vary on; gzip_vary on;
gzip_proxied any; gzip_proxied any;
@ -84,22 +86,27 @@ server {
# Upload and MediaProxy Subdomain # Upload and MediaProxy Subdomain
# (see main domain setup for more details) # (see main domain setup for more details)
server {
server_name media.example.tld;
listen 80;
listen [::]:80;
location / {
return 301 https://$server_name$request_uri;
}
}
server { server {
server_name media.example.tld; server_name media.example.tld;
# Same as above, will be updated to HTTPS once certbot is set up. listen 443 ssl http2;
listen 80; listen [::]:443 ssl http2;
listen [::]:80;
# If you are not using certbot, comment the above and copy all the ssl ssl_trusted_certificate /etc/letsencrypt/live/media.example.tld/chain.pem;
# stuff from above into here. ssl_certificate /etc/letsencrypt/live/media.example.tld/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/media.example.tld/privkey.pem;
gzip_vary on; # .. copy all other the ssl_* and gzip_* stuff from main domain
gzip_proxied any;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript application/activity+json application/atom+xml;
# the nginx default is 1m, not enough for large media uploads # the nginx default is 1m, not enough for large media uploads
client_max_body_size 16m; client_max_body_size 16m;

View file

@ -5,7 +5,7 @@
SCRIPTNAME=${0##*/} SCRIPTNAME=${0##*/}
# NGINX cache directory # NGINX cache directory
CACHE_DIRECTORY="/var/tmp/akkoma-media-cache" CACHE_DIRECTORY="/tmp/akkoma-media-cache"
## Return the files where the items are cached. ## Return the files where the items are cached.
## $1 - the filename, can be a pattern . ## $1 - the filename, can be a pattern .

View file

@ -16,7 +16,7 @@ defmodule Mix.Pleroma do
:fast_html, :fast_html,
:oban :oban
] ]
@cachex_children ["object", "user", "scrubber", "web_resp", "http_backoff"] @cachex_children ["object", "user", "scrubber", "web_resp"]
@doc "Common functions to be reused in mix tasks" @doc "Common functions to be reused in mix tasks"
def start_pleroma do def start_pleroma do
Pleroma.Config.Holder.save_default() Pleroma.Config.Holder.save_default()
@ -112,26 +112,18 @@ def shell_prompt(prompt, defval \\ nil, defname \\ nil) do
end end
end end
def shell_info(message) when is_binary(message) or is_list(message) do def shell_info(message) do
if mix_shell?(), if mix_shell?(),
do: Mix.shell().info(message), do: Mix.shell().info(message),
else: IO.puts(message) else: IO.puts(message)
end end
def shell_info(message) do def shell_error(message) do
shell_info("#{inspect(message)}")
end
def shell_error(message) when is_binary(message) or is_list(message) do
if mix_shell?(), if mix_shell?(),
do: Mix.shell().error(message), do: Mix.shell().error(message),
else: IO.puts(:stderr, message) else: IO.puts(:stderr, message)
end end
def shell_error(message) do
shell_error("#{inspect(message)}")
end
@doc "Performs a safe check whether `Mix.shell/0` is available (does not raise if Mix is not loaded)" @doc "Performs a safe check whether `Mix.shell/0` is available (does not raise if Mix is not loaded)"
def mix_shell?, do: :erlang.function_exported(Mix, :shell, 0) def mix_shell?, do: :erlang.function_exported(Mix, :shell, 0)

View file

@ -8,6 +8,7 @@ defmodule Mix.Tasks.Pleroma.Activity do
alias Pleroma.User alias Pleroma.User
alias Pleroma.Web.CommonAPI alias Pleroma.Web.CommonAPI
alias Pleroma.Pagination alias Pleroma.Pagination
require Logger
import Mix.Pleroma import Mix.Pleroma
import Ecto.Query import Ecto.Query
@ -16,7 +17,7 @@ def run(["get", id | _rest]) do
id id
|> Activity.get_by_id() |> Activity.get_by_id()
|> shell_info() |> IO.inspect()
end end
def run(["delete_by_keyword", user, keyword | _rest]) do def run(["delete_by_keyword", user, keyword | _rest]) do
@ -34,7 +35,7 @@ def run(["delete_by_keyword", user, keyword | _rest]) do
) )
|> Enum.map(fn x -> CommonAPI.delete(x.id, u) end) |> Enum.map(fn x -> CommonAPI.delete(x.id, u) end)
|> Enum.count() |> Enum.count()
|> shell_info() |> IO.puts()
end end
defp query_with(q, search_query) do defp query_with(q, search_query) do

View file

@ -20,102 +20,6 @@ defmodule Mix.Tasks.Pleroma.Database do
@shortdoc "A collection of database related tasks" @shortdoc "A collection of database related tasks"
@moduledoc File.read!("docs/docs/administration/CLI_tasks/database.md") @moduledoc File.read!("docs/docs/administration/CLI_tasks/database.md")
defp maybe_limit(query, limit_cnt) do
if is_number(limit_cnt) and limit_cnt > 0 do
limit(query, [], ^limit_cnt)
else
query
end
end
defp limit_statement(limit) when is_number(limit) do
if limit > 0 do
"LIMIT #{limit}"
else
""
end
end
defp prune_orphaned_activities_singles(limit) do
%{:num_rows => del_single} =
"""
delete from public.activities
where id in (
select a.id from public.activities a
left join public.objects o on a.data ->> 'object' = o.data ->> 'id'
left join public.activities a2 on a.data ->> 'object' = a2.data ->> 'id'
left join public.users u on a.data ->> 'object' = u.ap_id
where not a.local
and jsonb_typeof(a."data" -> 'object') = 'string'
and o.id is null
and a2.id is null
and u.id is null
#{limit_statement(limit)}
)
"""
|> Repo.query!([], timeout: :infinity)
Logger.info("Prune activity singles: deleted #{del_single} rows...")
del_single
end
defp prune_orphaned_activities_array(limit) do
%{:num_rows => del_array} =
"""
delete from public.activities
where id in (
select a.id from public.activities a
join json_array_elements_text((a."data" -> 'object')::json) as j
on a.data->>'type' = 'Flag'
left join public.objects o on j.value = o.data ->> 'id'
left join public.activities a2 on j.value = a2.data ->> 'id'
left join public.users u on j.value = u.ap_id
group by a.id
having max(o.data ->> 'id') is null
and max(a2.data ->> 'id') is null
and max(u.ap_id) is null
#{limit_statement(limit)}
)
"""
|> Repo.query!([], timeout: :infinity)
Logger.info("Prune activity arrays: deleted #{del_array} rows...")
del_array
end
def prune_orphaned_activities(limit \\ 0, opts \\ []) when is_number(limit) do
# Activities can either refer to a single object id, and array of object ids
# or contain an inlined object (at least after going through our normalisation)
#
# Flag is the only type we support with an array (and always has arrays).
# Update the only one with inlined objects.
#
# We already regularly purge old Delete, Undo, Update and Remove and if
# rejected Follow requests anyway; no need to explicitly deal with those here.
#
# Since theres an index on types and there are typically only few Flag
# activites, its _much_ faster to utilise the index. To avoid accidentally
# deleting useful activities should more types be added, keep typeof for singles.
# Prune activities who link to an array of objects
del_array =
if Keyword.get(opts, :arrays, true) do
prune_orphaned_activities_array(limit)
else
0
end
# Prune activities who link to a single object
del_single =
if Keyword.get(opts, :singles, true) do
prune_orphaned_activities_singles(limit)
else
0
end
del_single + del_array
end
def run(["remove_embedded_objects" | args]) do def run(["remove_embedded_objects" | args]) do
{options, [], []} = {options, [], []} =
OptionParser.parse( OptionParser.parse(
@ -158,37 +62,6 @@ def run(["update_users_following_followers_counts"]) do
) )
end end
def run(["prune_orphaned_activities" | args]) do
{options, [], []} =
OptionParser.parse(
args,
strict: [
limit: :integer,
singles: :boolean,
arrays: :boolean
]
)
start_pleroma()
{limit, options} = Keyword.pop(options, :limit, 0)
log_message = "Pruning orphaned activities"
log_message =
if limit > 0 do
log_message <> ", limiting deletion to #{limit} rows"
else
log_message
end
Logger.info(log_message)
deleted = prune_orphaned_activities(limit, options)
Logger.info("Deleted #{deleted} rows")
end
def run(["prune_objects" | args]) do def run(["prune_objects" | args]) do
{options, [], []} = {options, [], []} =
OptionParser.parse( OptionParser.parse(
@ -197,8 +70,7 @@ def run(["prune_objects" | args]) do
vacuum: :boolean, vacuum: :boolean,
keep_threads: :boolean, keep_threads: :boolean,
keep_non_public: :boolean, keep_non_public: :boolean,
prune_orphaned_activities: :boolean, prune_orphaned_activities: :boolean
limit: :integer
] ]
) )
@ -207,8 +79,6 @@ def run(["prune_objects" | args]) do
deadline = Pleroma.Config.get([:instance, :remote_post_retention_days]) deadline = Pleroma.Config.get([:instance, :remote_post_retention_days])
time_deadline = NaiveDateTime.utc_now() |> NaiveDateTime.add(-(deadline * 86_400)) time_deadline = NaiveDateTime.utc_now() |> NaiveDateTime.add(-(deadline * 86_400))
limit_cnt = Keyword.get(options, :limit, 0)
log_message = "Pruning objects older than #{deadline} days" log_message = "Pruning objects older than #{deadline} days"
log_message = log_message =
@ -240,124 +110,129 @@ def run(["prune_objects" | args]) do
log_message log_message
end end
log_message =
if limit_cnt > 0 do
log_message <> ", limiting to #{limit_cnt} rows"
else
log_message
end
Logger.info(log_message) Logger.info(log_message)
{del_obj, _} = if Keyword.get(options, :keep_threads) do
if Keyword.get(options, :keep_threads) do # We want to delete objects from threads where
# We want to delete objects from threads where # 1. the newest post is still old
# 1. the newest post is still old # 2. none of the activities is local
# 2. none of the activities is local # 3. none of the activities is bookmarked
# 3. none of the activities is bookmarked # 4. optionally none of the posts is non-public
# 4. optionally none of the posts is non-public deletable_context =
deletable_context = if Keyword.get(options, :keep_non_public) do
if Keyword.get(options, :keep_non_public) do Pleroma.Activity
Pleroma.Activity |> join(:left, [a], b in Pleroma.Bookmark, on: a.id == b.activity_id)
|> join(:left, [a], b in Pleroma.Bookmark, on: a.id == b.activity_id) |> group_by([a], fragment("? ->> 'context'::text", a.data))
|> group_by([a], fragment("? ->> 'context'::text", a.data)) |> having(
|> having( [a],
[a], not fragment(
not fragment( # Posts (checked on Create Activity) is non-public
# Posts (checked on Create Activity) is non-public "bool_or((not(?->'to' \\? ? OR ?->'cc' \\? ?)) and ? ->> 'type' = 'Create')",
"bool_or((not(?->'to' \\? ? OR ?->'cc' \\? ?)) and ? ->> 'type' = 'Create')", a.data,
a.data, ^Pleroma.Constants.as_public(),
^Pleroma.Constants.as_public(), a.data,
a.data, ^Pleroma.Constants.as_public(),
^Pleroma.Constants.as_public(), a.data
a.data
)
) )
else
Pleroma.Activity
|> join(:left, [a], b in Pleroma.Bookmark, on: a.id == b.activity_id)
|> group_by([a], fragment("? ->> 'context'::text", a.data))
end
|> having([a], max(a.updated_at) < ^time_deadline)
|> having([a], not fragment("bool_or(?)", a.local))
|> having([_, b], fragment("max(?::text) is null", b.id))
|> maybe_limit(limit_cnt)
|> select([a], fragment("? ->> 'context'::text", a.data))
Pleroma.Object
|> where([o], fragment("? ->> 'context'::text", o.data) in subquery(deletable_context))
else
deletable =
if Keyword.get(options, :keep_non_public) do
Pleroma.Object
|> where(
[o],
fragment(
"?->'to' \\? ? OR ?->'cc' \\? ?",
o.data,
^Pleroma.Constants.as_public(),
o.data,
^Pleroma.Constants.as_public()
)
)
else
Pleroma.Object
end
|> where([o], o.updated_at < ^time_deadline)
|> where(
[o],
fragment("split_part(?->>'actor', '/', 3) != ?", o.data, ^Pleroma.Web.Endpoint.host())
) )
|> maybe_limit(limit_cnt) else
|> select([o], o.id) Pleroma.Activity
|> join(:left, [a], b in Pleroma.Bookmark, on: a.id == b.activity_id)
|> group_by([a], fragment("? ->> 'context'::text", a.data))
end
|> having([a], max(a.updated_at) < ^time_deadline)
|> having([a], not fragment("bool_or(?)", a.local))
|> having([_, b], fragment("max(?::text) is null", b.id))
|> select([a], fragment("? ->> 'context'::text", a.data))
Pleroma.Object
|> where([o], fragment("? ->> 'context'::text", o.data) in subquery(deletable_context))
else
if Keyword.get(options, :keep_non_public) do
Pleroma.Object
|> where(
[o],
fragment(
"?->'to' \\? ? OR ?->'cc' \\? ?",
o.data,
^Pleroma.Constants.as_public(),
o.data,
^Pleroma.Constants.as_public()
)
)
else
Pleroma.Object Pleroma.Object
|> where([o], o.id in subquery(deletable))
end end
|> Repo.delete_all(timeout: :infinity) |> where([o], o.updated_at < ^time_deadline)
|> where(
Logger.info("Deleted #{del_obj} objects...") [o],
fragment("split_part(?->>'actor', '/', 3) != ?", o.data, ^Pleroma.Web.Endpoint.host())
)
end
|> Repo.delete_all(timeout: :infinity)
if !Keyword.get(options, :keep_threads) do if !Keyword.get(options, :keep_threads) do
# Without the --keep-threads option, it's possible that bookmarked # Without the --keep-threads option, it's possible that bookmarked
# objects have been deleted. We remove the corresponding bookmarks. # objects have been deleted. We remove the corresponding bookmarks.
%{:num_rows => del_bookmarks} = """
""" delete from public.bookmarks
delete from public.bookmarks where id in (
where id in ( select b.id from public.bookmarks b
select b.id from public.bookmarks b left join public.activities a on b.activity_id = a.id
left join public.activities a on b.activity_id = a.id left join public.objects o on a."data" ->> 'object' = o.data ->> 'id'
left join public.objects o on a."data" ->> 'object' = o.data ->> 'id' where o.id is null
where o.id is null )
) """
""" |> Repo.query([], timeout: :infinity)
|> Repo.query!([], timeout: :infinity)
Logger.info("Deleted #{del_bookmarks} orphaned bookmarks...")
end end
if Keyword.get(options, :prune_orphaned_activities) do if Keyword.get(options, :prune_orphaned_activities) do
del_activities = prune_orphaned_activities() # Prune activities who link to a single object
Logger.info("Deleted #{del_activities} orphaned activities...") """
delete from public.activities
where id in (
select a.id from public.activities a
left join public.objects o on a.data ->> 'object' = o.data ->> 'id'
left join public.activities a2 on a.data ->> 'object' = a2.data ->> 'id'
left join public.users u on a.data ->> 'object' = u.ap_id
where not a.local
and jsonb_typeof(a."data" -> 'object') = 'string'
and o.id is null
and a2.id is null
and u.id is null
)
"""
|> Repo.query([], timeout: :infinity)
# Prune activities who link to an array of objects
"""
delete from public.activities
where id in (
select a.id from public.activities a
join json_array_elements_text((a."data" -> 'object')::json) as j on jsonb_typeof(a."data" -> 'object') = 'array'
left join public.objects o on j.value = o.data ->> 'id'
left join public.activities a2 on j.value = a2.data ->> 'id'
left join public.users u on j.value = u.ap_id
group by a.id
having max(o.data ->> 'id') is null
and max(a2.data ->> 'id') is null
and max(u.ap_id) is null
)
"""
|> Repo.query([], timeout: :infinity)
end end
%{:num_rows => del_hashtags} = """
""" DELETE FROM hashtags AS ht
DELETE FROM hashtags AS ht WHERE NOT EXISTS (
WHERE NOT EXISTS ( SELECT 1 FROM hashtags_objects hto
SELECT 1 FROM hashtags_objects hto WHERE ht.id = hto.hashtag_id)
WHERE ht.id = hto.hashtag_id) """
""" |> Repo.query()
|> Repo.query!()
Logger.info("Deleted #{del_hashtags} no longer used hashtags...")
if Keyword.get(options, :vacuum) do if Keyword.get(options, :vacuum) do
Logger.info("Starting vacuum...")
Maintenance.vacuum("full") Maintenance.vacuum("full")
end end
Logger.info("All done!")
end end
def run(["prune_task"]) do def run(["prune_task"]) do

View file

@ -3,6 +3,7 @@ defmodule Mix.Tasks.Pleroma.Diagnostics do
alias Pleroma.Repo alias Pleroma.Repo
alias Pleroma.User alias Pleroma.User
require Logger
require Pleroma.Constants require Pleroma.Constants
import Mix.Pleroma import Mix.Pleroma
@ -13,20 +14,13 @@ def run(["http", url]) do
start_pleroma() start_pleroma()
Pleroma.HTTP.get(url) Pleroma.HTTP.get(url)
|> shell_info()
end
def run(["fetch_object", url]) do
start_pleroma()
Pleroma.Object.Fetcher.fetch_object_from_id(url)
|> IO.inspect() |> IO.inspect()
end end
def run(["home_timeline", nickname]) do def run(["home_timeline", nickname]) do
start_pleroma() start_pleroma()
user = Repo.get_by!(User, nickname: nickname) user = Repo.get_by!(User, nickname: nickname)
shell_info("Home timeline query #{user.nickname}") Logger.info("Home timeline query #{user.nickname}")
followed_hashtags = followed_hashtags =
user user
@ -55,14 +49,14 @@ def run(["home_timeline", nickname]) do
|> limit(20) |> limit(20)
Ecto.Adapters.SQL.explain(Repo, :all, query, analyze: true, timeout: :infinity) Ecto.Adapters.SQL.explain(Repo, :all, query, analyze: true, timeout: :infinity)
|> shell_info() |> IO.puts()
end end
def run(["user_timeline", nickname, reading_nickname]) do def run(["user_timeline", nickname, reading_nickname]) do
start_pleroma() start_pleroma()
user = Repo.get_by!(User, nickname: nickname) user = Repo.get_by!(User, nickname: nickname)
reading_user = Repo.get_by!(User, nickname: reading_nickname) reading_user = Repo.get_by!(User, nickname: reading_nickname)
shell_info("User timeline query #{user.nickname}") Logger.info("User timeline query #{user.nickname}")
params = params =
%{limit: 20} %{limit: 20}
@ -86,7 +80,7 @@ def run(["user_timeline", nickname, reading_nickname]) do
|> limit(20) |> limit(20)
Ecto.Adapters.SQL.explain(Repo, :all, query, analyze: true, timeout: :infinity) Ecto.Adapters.SQL.explain(Repo, :all, query, analyze: true, timeout: :infinity)
|> shell_info() |> IO.puts()
end end
def run(["notifications", nickname]) do def run(["notifications", nickname]) do
@ -102,7 +96,7 @@ def run(["notifications", nickname]) do
|> limit(20) |> limit(20)
Ecto.Adapters.SQL.explain(Repo, :all, query, analyze: true, timeout: :infinity) Ecto.Adapters.SQL.explain(Repo, :all, query, analyze: true, timeout: :infinity)
|> shell_info() |> IO.puts()
end end
def run(["known_network", nickname]) do def run(["known_network", nickname]) do
@ -128,6 +122,6 @@ def run(["known_network", nickname]) do
|> limit(20) |> limit(20)
Ecto.Adapters.SQL.explain(Repo, :all, query, analyze: true, timeout: :infinity) Ecto.Adapters.SQL.explain(Repo, :all, query, analyze: true, timeout: :infinity)
|> shell_info() |> IO.puts()
end end
end end

View file

@ -27,11 +27,11 @@ def run(["ls-packs" | args]) do
] ]
for {param, value} <- to_print do for {param, value} <- to_print do
shell_info(IO.ANSI.format([:bright, param, :normal, ": ", value])) IO.puts(IO.ANSI.format([:bright, param, :normal, ": ", value]))
end end
# A newline # A newline
shell_info("") IO.puts("")
end) end)
end end
@ -49,7 +49,7 @@ def run(["get-packs" | args]) do
pack = manifest[pack_name] pack = manifest[pack_name]
src = pack["src"] src = pack["src"]
shell_info( IO.puts(
IO.ANSI.format([ IO.ANSI.format([
"Downloading ", "Downloading ",
:bright, :bright,
@ -67,9 +67,9 @@ def run(["get-packs" | args]) do
sha_status_text = ["SHA256 of ", :bright, pack_name, :normal, " source file is ", :bright] sha_status_text = ["SHA256 of ", :bright, pack_name, :normal, " source file is ", :bright]
if archive_sha == String.upcase(pack["src_sha256"]) do if archive_sha == String.upcase(pack["src_sha256"]) do
shell_info(IO.ANSI.format(sha_status_text ++ [:green, "OK"])) IO.puts(IO.ANSI.format(sha_status_text ++ [:green, "OK"]))
else else
shell_info(IO.ANSI.format(sha_status_text ++ [:red, "BAD"])) IO.puts(IO.ANSI.format(sha_status_text ++ [:red, "BAD"]))
raise "Bad SHA256 for #{pack_name}" raise "Bad SHA256 for #{pack_name}"
end end
@ -80,7 +80,7 @@ def run(["get-packs" | args]) do
|> Path.dirname() |> Path.dirname()
|> Path.join(pack["files"]) |> Path.join(pack["files"])
shell_info( IO.puts(
IO.ANSI.format([ IO.ANSI.format([
"Fetching the file list for ", "Fetching the file list for ",
:bright, :bright,
@ -94,7 +94,7 @@ def run(["get-packs" | args]) do
files = fetch_and_decode!(files_loc) files = fetch_and_decode!(files_loc)
shell_info(IO.ANSI.format(["Unpacking ", :bright, pack_name])) IO.puts(IO.ANSI.format(["Unpacking ", :bright, pack_name]))
pack_path = pack_path =
Path.join([ Path.join([
@ -111,11 +111,11 @@ def run(["get-packs" | args]) do
{:ok, _} = {:ok, _} =
:zip.unzip(binary_archive, :zip.unzip(binary_archive,
cwd: to_charlist(pack_path), cwd: pack_path,
file_list: files_to_unzip file_list: files_to_unzip
) )
shell_info(IO.ANSI.format(["Writing pack.json for ", :bright, pack_name])) IO.puts(IO.ANSI.format(["Writing pack.json for ", :bright, pack_name]))
pack_json = %{ pack_json = %{
pack: %{ pack: %{
@ -132,7 +132,7 @@ def run(["get-packs" | args]) do
File.write!(Path.join(pack_path, "pack.json"), Jason.encode!(pack_json, pretty: true)) File.write!(Path.join(pack_path, "pack.json"), Jason.encode!(pack_json, pretty: true))
Pleroma.Emoji.reload() Pleroma.Emoji.reload()
else else
shell_info(IO.ANSI.format([:bright, :red, "No pack named \"#{pack_name}\" found"])) IO.puts(IO.ANSI.format([:bright, :red, "No pack named \"#{pack_name}\" found"]))
end end
end end
end end
@ -180,14 +180,14 @@ def run(["gen-pack" | args]) do
custom_exts custom_exts
end end
shell_info("Using #{Enum.join(exts, " ")} extensions") IO.puts("Using #{Enum.join(exts, " ")} extensions")
shell_info("Downloading the pack and generating SHA256") IO.puts("Downloading the pack and generating SHA256")
{:ok, %{body: binary_archive}} = Pleroma.HTTP.get(src) {:ok, %{body: binary_archive}} = Pleroma.HTTP.get(src)
archive_sha = :crypto.hash(:sha256, binary_archive) |> Base.encode16() archive_sha = :crypto.hash(:sha256, binary_archive) |> Base.encode16()
shell_info("SHA256 is #{archive_sha}") IO.puts("SHA256 is #{archive_sha}")
pack_json = %{ pack_json = %{
name => %{ name => %{
@ -208,7 +208,7 @@ def run(["gen-pack" | args]) do
File.write!(files_name, Jason.encode!(emoji_map, pretty: true)) File.write!(files_name, Jason.encode!(emoji_map, pretty: true))
shell_info(""" IO.puts("""
#{files_name} has been created and contains the list of all found emojis in the pack. #{files_name} has been created and contains the list of all found emojis in the pack.
Please review the files in the pack and remove those not needed. Please review the files in the pack and remove those not needed.
@ -230,11 +230,11 @@ def run(["gen-pack" | args]) do
) )
) )
shell_info("#{pack_file} has been updated with the #{name} pack") IO.puts("#{pack_file} has been updated with the #{name} pack")
else else
File.write!(pack_file, Jason.encode!(pack_json, pretty: true)) File.write!(pack_file, Jason.encode!(pack_json, pretty: true))
shell_info("#{pack_file} has been created with the #{name} pack") IO.puts("#{pack_file} has been created with the #{name} pack")
end end
Pleroma.Emoji.reload() Pleroma.Emoji.reload()
@ -243,7 +243,7 @@ def run(["gen-pack" | args]) do
def run(["reload"]) do def run(["reload"]) do
start_pleroma() start_pleroma()
Pleroma.Emoji.reload() Pleroma.Emoji.reload()
shell_info("Emoji packs have been reloaded.") IO.puts("Emoji packs have been reloaded.")
end end
defp fetch_and_decode!(from) do defp fetch_and_decode!(from) do

View file

@ -35,8 +35,7 @@ def run(["gen" | rest]) do
static_dir: :string, static_dir: :string,
listen_ip: :string, listen_ip: :string,
listen_port: :string, listen_port: :string,
strip_uploads_metadata: :string, strip_uploads: :string,
read_uploads_description: :string,
anonymize_uploads: :string anonymize_uploads: :string
], ],
aliases: [ aliases: [
@ -170,38 +169,21 @@ def run(["gen" | rest]) do
) )
|> Path.expand() |> Path.expand()
{strip_uploads_metadata_message, strip_uploads_metadata_default} = {strip_uploads_message, strip_uploads_default} =
if Pleroma.Utils.command_available?("exiftool") do if Pleroma.Utils.command_available?("exiftool") do
{"Do you want to strip metadata from uploaded images? This requires exiftool, it was detected as installed. (y/n)", {"Do you want to strip location (GPS) data from uploaded images? This requires exiftool, it was detected as installed. (y/n)",
"y"} "y"}
else else
{"Do you want to strip metadata from uploaded images? This requires exiftool, it was detected as not installed, please install it if you answer yes. (y/n)", {"Do you want to strip location (GPS) data from uploaded images? This requires exiftool, it was detected as not installed, please install it if you answer yes. (y/n)",
"n"} "n"}
end end
strip_uploads_metadata = strip_uploads =
get_option( get_option(
options, options,
:strip_uploads_metadata, :strip_uploads,
strip_uploads_metadata_message, strip_uploads_message,
strip_uploads_metadata_default strip_uploads_default
) === "y"
{read_uploads_description_message, read_uploads_description_default} =
if Pleroma.Utils.command_available?("exiftool") do
{"Do you want to read data from uploaded files so clients can use it to prefill fields like image description? This requires exiftool, it was detected as installed. (y/n)",
"y"}
else
{"Do you want to read data from uploaded files so clients can use it to prefill fields like image description? This requires exiftool, it was detected as not installed, please install it if you answer yes. (y/n)",
"n"}
end
read_uploads_description =
get_option(
options,
:read_uploads_description,
read_uploads_description_message,
read_uploads_description_default
) === "y" ) === "y"
anonymize_uploads = anonymize_uploads =
@ -248,8 +230,7 @@ def run(["gen" | rest]) do
listen_port: listen_port, listen_port: listen_port,
upload_filters: upload_filters:
upload_filters(%{ upload_filters(%{
strip_metadata: strip_uploads_metadata, strip: strip_uploads,
read_description: read_uploads_description,
anonymize: anonymize_uploads anonymize: anonymize_uploads
}) })
) )
@ -324,20 +305,11 @@ defp write_robots_txt(static_dir, indexable, template_dir) do
end end
defp upload_filters(filters) when is_map(filters) do defp upload_filters(filters) when is_map(filters) do
enabled_filters = []
enabled_filters = enabled_filters =
if filters.read_description do if filters.strip do
enabled_filters ++ [Pleroma.Upload.Filter.Exiftool.ReadDescription] [Pleroma.Upload.Filter.Exiftool]
else else
enabled_filters []
end
enabled_filters =
if filters.strip_metadata do
enabled_filters ++ [Pleroma.Upload.Filter.Exiftool.StripMetadata]
else
enabled_filters
end end
enabled_filters = enabled_filters =

View file

@ -11,6 +11,7 @@ defmodule Mix.Tasks.Pleroma.RefreshCounterCache do
alias Pleroma.CounterCache alias Pleroma.CounterCache
alias Pleroma.Repo alias Pleroma.Repo
require Logger
import Ecto.Query import Ecto.Query
def run([]) do def run([]) do

View file

@ -48,7 +48,7 @@ def run(["index"]) do
] ]
) )
shell_info("Created indices. Starting to insert posts.") IO.puts("Created indices. Starting to insert posts.")
chunk_size = Pleroma.Config.get([Pleroma.Search.Meilisearch, :initial_indexing_chunk_size]) chunk_size = Pleroma.Config.get([Pleroma.Search.Meilisearch, :initial_indexing_chunk_size])
@ -65,7 +65,7 @@ def run(["index"]) do
) )
count = query |> Pleroma.Repo.aggregate(:count, :data) count = query |> Pleroma.Repo.aggregate(:count, :data)
shell_info("Entries to index: #{count}") IO.puts("Entries to index: #{count}")
Pleroma.Repo.stream( Pleroma.Repo.stream(
query, query,
@ -92,10 +92,10 @@ def run(["index"]) do
with {:ok, res} <- result do with {:ok, res} <- result do
if not Map.has_key?(res, "indexUid") do if not Map.has_key?(res, "indexUid") do
shell_info("\nFailed to index: #{inspect(result)}") IO.puts("\nFailed to index: #{inspect(result)}")
end end
else else
e -> shell_error("\nFailed to index due to network error: #{inspect(e)}") e -> IO.puts("\nFailed to index due to network error: #{inspect(e)}")
end end
end) end)
|> Stream.run() |> Stream.run()
@ -126,15 +126,11 @@ def run(["show-keys", master_key]) do
decoded = Jason.decode!(result.body) decoded = Jason.decode!(result.body)
if decoded["results"] do if decoded["results"] do
Enum.each(decoded["results"], fn Enum.each(decoded["results"], fn %{"description" => desc, "key" => key} ->
%{"name" => name, "key" => key} -> IO.puts("#{desc}: #{key}")
shell_info("#{name}: #{key}")
%{"description" => desc, "key" => key} ->
shell_info("#{desc}: #{key}")
end) end)
else else
shell_error("Error fetching the keys, check the master key is correct: #{inspect(decoded)}") IO.puts("Error fetching the keys, check the master key is correct: #{inspect(decoded)}")
end end
end end
@ -142,7 +138,7 @@ def run(["stats"]) do
start_pleroma() start_pleroma()
{:ok, result} = meili_get("/indexes/objects/stats") {:ok, result} = meili_get("/indexes/objects/stats")
shell_info("Number of entries: #{result["numberOfDocuments"]}") IO.puts("Number of entries: #{result["numberOfDocuments"]}")
shell_info("Indexing? #{result["isIndexing"]}") IO.puts("Indexing? #{result["isIndexing"]}")
end end
end end

View file

@ -38,7 +38,7 @@ def run(["spoof-uploaded"]) do
Logger.put_process_level(self(), :notice) Logger.put_process_level(self(), :notice)
start_pleroma() start_pleroma()
shell_info(""" IO.puts("""
+------------------------+ +------------------------+
| SPOOF SEARCH UPLOADS | | SPOOF SEARCH UPLOADS |
+------------------------+ +------------------------+
@ -55,7 +55,7 @@ def run(["spoof-inserted"]) do
Logger.put_process_level(self(), :notice) Logger.put_process_level(self(), :notice)
start_pleroma() start_pleroma()
shell_info(""" IO.puts("""
+----------------------+ +----------------------+
| SPOOF SEARCH NOTES | | SPOOF SEARCH NOTES |
+----------------------+ +----------------------+
@ -77,7 +77,7 @@ defp do_spoof_uploaded() do
uploads_search_spoofs_local_dir(Config.get!([Pleroma.Uploaders.Local, :uploads])) uploads_search_spoofs_local_dir(Config.get!([Pleroma.Uploaders.Local, :uploads]))
_ -> _ ->
shell_info(""" IO.puts("""
NOTE: NOTE:
Not using local uploader; thus not affected by this exploit. Not using local uploader; thus not affected by this exploit.
It's impossible to check for files, but in case local uploader was used before It's impossible to check for files, but in case local uploader was used before
@ -98,13 +98,13 @@ defp do_spoof_uploaded() do
orphaned_attachs = upload_search_orphaned_attachments(not_orphaned_urls) orphaned_attachs = upload_search_orphaned_attachments(not_orphaned_urls)
shell_info("\nSearch concluded; here are the results:") IO.puts("\nSearch concluded; here are the results:")
pretty_print_list_with_title(emoji, "Emoji") pretty_print_list_with_title(emoji, "Emoji")
pretty_print_list_with_title(files, "Uploaded Files") pretty_print_list_with_title(files, "Uploaded Files")
pretty_print_list_with_title(post_attachs, "(Not Deleted) Post Attachments") pretty_print_list_with_title(post_attachs, "(Not Deleted) Post Attachments")
pretty_print_list_with_title(orphaned_attachs, "Orphaned Uploads") pretty_print_list_with_title(orphaned_attachs, "Orphaned Uploads")
shell_info(""" IO.puts("""
In total found In total found
#{length(emoji)} emoji #{length(emoji)} emoji
#{length(files)} uploads #{length(files)} uploads
@ -116,7 +116,7 @@ defp do_spoof_uploaded() do
defp uploads_search_spoofs_local_dir(dir) do defp uploads_search_spoofs_local_dir(dir) do
local_dir = String.replace_suffix(dir, "/", "") local_dir = String.replace_suffix(dir, "/", "")
shell_info("Searching for suspicious files in #{local_dir}...") IO.puts("Searching for suspicious files in #{local_dir}...")
glob_ext = "{" <> Enum.join(@activity_exts, ",") <> "}" glob_ext = "{" <> Enum.join(@activity_exts, ",") <> "}"
@ -128,7 +128,7 @@ defp uploads_search_spoofs_local_dir(dir) do
end end
defp uploads_search_spoofs_notes() do defp uploads_search_spoofs_notes() do
shell_info("Now querying DB for posts with spoofing attachments. This might take a while...") IO.puts("Now querying DB for posts with spoofing attachments. This might take a while...")
patterns = [local_id_pattern() | activity_ext_url_patterns()] patterns = [local_id_pattern() | activity_ext_url_patterns()]
@ -153,7 +153,7 @@ defp uploads_search_spoofs_notes() do
end end
defp upload_search_orphaned_attachments(not_orphaned_urls) do defp upload_search_orphaned_attachments(not_orphaned_urls) do
shell_info(""" IO.puts("""
Now querying DB for orphaned spoofing attachment (i.e. their post was deleted, Now querying DB for orphaned spoofing attachment (i.e. their post was deleted,
but if :cleanup_attachments was not enabled traces remain in the database) but if :cleanup_attachments was not enabled traces remain in the database)
This might take a bit... This might take a bit...
@ -184,7 +184,7 @@ defp upload_search_orphaned_attachments(not_orphaned_urls) do
# | S P O O F - I N S E R T E D | # | S P O O F - I N S E R T E D |
# +-----------------------------+ # +-----------------------------+
defp do_spoof_inserted() do defp do_spoof_inserted() do
shell_info(""" IO.puts("""
Searching for local posts whose Create activity has no ActivityPub id... Searching for local posts whose Create activity has no ActivityPub id...
This is a pretty good indicator, but only for spoofs of local actors This is a pretty good indicator, but only for spoofs of local actors
and only if the spoofing happened after around late 2021. and only if the spoofing happened after around late 2021.
@ -194,9 +194,9 @@ defp do_spoof_inserted() do
search_local_notes_without_create_id() search_local_notes_without_create_id()
|> Enum.sort() |> Enum.sort()
shell_info("Done.\n") IO.puts("Done.\n")
shell_info(""" IO.puts("""
Now trying to weed out other poorly hidden spoofs. Now trying to weed out other poorly hidden spoofs.
This can't detect all and may have some false positives. This can't detect all and may have some false positives.
""") """)
@ -207,9 +207,9 @@ defp do_spoof_inserted() do
search_sus_notes_by_id_patterns() search_sus_notes_by_id_patterns()
|> Enum.filter(fn r -> !(r in likely_spoofed_posts_set) end) |> Enum.filter(fn r -> !(r in likely_spoofed_posts_set) end)
shell_info("Done.\n") IO.puts("Done.\n")
shell_info(""" IO.puts("""
Finally, searching for spoofed, local user accounts. Finally, searching for spoofed, local user accounts.
(It's impossible to detect spoofed remote users) (It's impossible to detect spoofed remote users)
""") """)
@ -220,7 +220,7 @@ defp do_spoof_inserted() do
pretty_print_list_with_title(idless_create, "Likely Spoofed Posts") pretty_print_list_with_title(idless_create, "Likely Spoofed Posts")
pretty_print_list_with_title(spoofed_users, "Spoofed local user accounts") pretty_print_list_with_title(spoofed_users, "Spoofed local user accounts")
shell_info(""" IO.puts("""
In total found: In total found:
#{length(spoofed_users)} bogus users #{length(spoofed_users)} bogus users
#{length(idless_create)} likely spoofed posts #{length(idless_create)} likely spoofed posts
@ -289,27 +289,27 @@ defp search_bogus_local_users() do
defp pretty_print_list_with_title(list, title) do defp pretty_print_list_with_title(list, title) do
title_len = String.length(title) title_len = String.length(title)
title_underline = String.duplicate("=", title_len) title_underline = String.duplicate("=", title_len)
shell_info(title) IO.puts(title)
shell_info(title_underline) IO.puts(title_underline)
pretty_print_list(list) pretty_print_list(list)
end end
defp pretty_print_list([]), do: shell_info("") defp pretty_print_list([]), do: IO.puts("")
defp pretty_print_list([{a, o} | rest]) defp pretty_print_list([{a, o} | rest])
when (is_binary(a) or is_number(a)) and is_binary(o) do when (is_binary(a) or is_number(a)) and is_binary(o) do
shell_info(" {#{a}, #{o}}") IO.puts(" {#{a}, #{o}}")
pretty_print_list(rest) pretty_print_list(rest)
end end
defp pretty_print_list([{u, a, o} | rest]) defp pretty_print_list([{u, a, o} | rest])
when is_binary(a) and is_binary(u) and is_binary(o) do when is_binary(a) and is_binary(u) and is_binary(o) do
shell_info(" {#{u}, #{a}, #{o}}") IO.puts(" {#{u}, #{a}, #{o}}")
pretty_print_list(rest) pretty_print_list(rest)
end end
defp pretty_print_list([e | rest]) when is_binary(e) do defp pretty_print_list([e | rest]) when is_binary(e) do
shell_info(" #{e}") IO.puts(" #{e}")
pretty_print_list(rest) pretty_print_list(rest)
end end

View file

@ -114,7 +114,7 @@ def run(["reset_password", nickname]) do
{:ok, token} <- Pleroma.PasswordResetToken.create_token(user) do {:ok, token} <- Pleroma.PasswordResetToken.create_token(user) do
shell_info("Generated password reset token for #{user.nickname}") shell_info("Generated password reset token for #{user.nickname}")
shell_info("URL: #{~p[/api/v1/pleroma/password_reset/#{token.token}]}") IO.puts("URL: #{~p[/api/v1/pleroma/password_reset/#{token.token}]}")
else else
_ -> _ ->
shell_error("No local user #{nickname}") shell_error("No local user #{nickname}")
@ -301,7 +301,7 @@ def run(["invite" | rest]) do
shell_info("Generated user invite token " <> String.replace(invite.invite_type, "_", " ")) shell_info("Generated user invite token " <> String.replace(invite.invite_type, "_", " "))
url = url(~p[/registration/#{invite.token}]) url = url(~p[/registration/#{invite.token}])
shell_info(url) IO.puts(url)
else else
error -> error ->
shell_error("Could not create invite token: #{inspect(error)}") shell_error("Could not create invite token: #{inspect(error)}")
@ -373,7 +373,7 @@ def run(["show", nickname]) do
nickname nickname
|> User.get_cached_by_nickname() |> User.get_cached_by_nickname()
shell_info(user) shell_info("#{inspect(user)}")
end end
def run(["send_confirmation", nickname]) do def run(["send_confirmation", nickname]) do
@ -457,7 +457,7 @@ def run(["blocking", nickname]) do
with %User{local: true} = user <- User.get_cached_by_nickname(nickname) do with %User{local: true} = user <- User.get_cached_by_nickname(nickname) do
blocks = User.following_ap_ids(user) blocks = User.following_ap_ids(user)
shell_info(blocks) IO.puts("#{inspect(blocks)}")
end end
end end
@ -516,12 +516,12 @@ def run(["fix_follow_state", local_user, remote_user]) do
{:follow_data, Pleroma.Web.ActivityPub.Utils.fetch_latest_follow(local, remote)} do {:follow_data, Pleroma.Web.ActivityPub.Utils.fetch_latest_follow(local, remote)} do
calculated_state = User.following?(local, remote) calculated_state = User.following?(local, remote)
shell_info( IO.puts(
"Request state is #{request_state}, vs calculated state of following=#{calculated_state}" "Request state is #{request_state}, vs calculated state of following=#{calculated_state}"
) )
if calculated_state == false && request_state == "accept" do if calculated_state == false && request_state == "accept" do
shell_info("Discrepancy found, fixing") IO.puts("Discrepancy found, fixing")
Pleroma.Web.CommonAPI.reject_follow_request(local, remote) Pleroma.Web.CommonAPI.reject_follow_request(local, remote)
shell_info("Relationship fixed") shell_info("Relationship fixed")
else else
@ -551,14 +551,14 @@ defp refetch_public_keys(query) do
|> Stream.each(fn users -> |> Stream.each(fn users ->
users users
|> Enum.each(fn user -> |> Enum.each(fn user ->
shell_info("Re-Resolving: #{user.ap_id}") IO.puts("Re-Resolving: #{user.ap_id}")
with {:ok, user} <- Pleroma.User.fetch_by_ap_id(user.ap_id), with {:ok, user} <- Pleroma.User.fetch_by_ap_id(user.ap_id),
changeset <- Pleroma.User.update_changeset(user), changeset <- Pleroma.User.update_changeset(user),
{:ok, _user} <- Pleroma.User.update_and_set_cache(changeset) do {:ok, _user} <- Pleroma.User.update_and_set_cache(changeset) do
:ok :ok
else else
error -> shell_info("Could not resolve: #{user.ap_id}, #{inspect(error)}") error -> IO.puts("Could not resolve: #{user.ap_id}, #{inspect(error)}")
end end
end) end)
end) end)

View file

@ -258,27 +258,6 @@ def get_create_by_object_ap_id(ap_id) when is_binary(ap_id) do
def get_create_by_object_ap_id(_), do: nil def get_create_by_object_ap_id(_), do: nil
@doc """
Accepts a list of `ap__id`.
Returns a query yielding Create activities for the given objects,
in the same order as they were specified in the input list.
"""
@spec get_presorted_create_by_object_ap_id([String.t()]) :: Ecto.Queryable.t()
def get_presorted_create_by_object_ap_id(ap_ids) do
from(
a in Activity,
join:
ids in fragment(
"SELECT * FROM UNNEST(?::text[]) WITH ORDINALITY AS ids(ap_id, ord)",
^ap_ids
),
on:
ids.ap_id == fragment("?->>'object'", a.data) and
fragment("?->>'type'", a.data) == "Create",
order_by: [asc: ids.ord]
)
end
@doc """ @doc """
Accepts `ap_id` or list of `ap_id`. Accepts `ap_id` or list of `ap_id`.
Returns a query. Returns a query.

View file

@ -28,7 +28,7 @@ defp get_cache_keys_for(activity_id) do
end end
end end
def add_cache_key_for(activity_id, additional_key) do defp add_cache_key_for(activity_id, additional_key) do
current = get_cache_keys_for(activity_id) current = get_cache_keys_for(activity_id)
unless additional_key in current do unless additional_key in current do

View file

@ -95,17 +95,34 @@ def start(_type, _args) do
opts = [strategy: :one_for_one, name: Pleroma.Supervisor, max_restarts: max_restarts] opts = [strategy: :one_for_one, name: Pleroma.Supervisor, max_restarts: max_restarts]
case Supervisor.start_link(children, opts) do with {:ok, data} <- Supervisor.start_link(children, opts) do
{:ok, data} -> set_postgres_server_version()
{:ok, data} {:ok, data}
else
e -> e ->
Logger.critical("Failed to start!") Logger.error("Failed to start!")
Logger.critical("#{inspect(e)}") Logger.error("#{inspect(e)}")
e e
end end
end end
defp set_postgres_server_version do
version =
with %{rows: [[version]]} <- Ecto.Adapters.SQL.query!(Pleroma.Repo, "show server_version"),
{num, _} <- Float.parse(version) do
num
else
e ->
Logger.warning(
"Could not get the postgres version: #{inspect(e)}.\nSetting the default value of 9.6"
)
9.6
end
:persistent_term.put({Pleroma.Repo, :postgres_version}, version)
end
def load_custom_modules do def load_custom_modules do
dir = Config.get([:modules, :runtime_dir]) dir = Config.get([:modules, :runtime_dir])
@ -162,9 +179,7 @@ defp cachex_children do
build_cachex("translations", default_ttl: :timer.hours(24 * 30), limit: 2500), build_cachex("translations", default_ttl: :timer.hours(24 * 30), limit: 2500),
build_cachex("instances", default_ttl: :timer.hours(24), ttl_interval: 1000, limit: 2500), build_cachex("instances", default_ttl: :timer.hours(24), ttl_interval: 1000, limit: 2500),
build_cachex("request_signatures", default_ttl: :timer.hours(24 * 30), limit: 3000), build_cachex("request_signatures", default_ttl: :timer.hours(24 * 30), limit: 3000),
build_cachex("rel_me", default_ttl: :timer.hours(24 * 30), limit: 300), build_cachex("rel_me", default_ttl: :timer.hours(24 * 30), limit: 300)
build_cachex("host_meta", default_ttl: :timer.minutes(120), limit: 5000),
build_cachex("http_backoff", default_ttl: :timer.hours(24 * 30), limit: 10000)
] ]
end end
@ -264,9 +279,7 @@ def limiters_setup do
defp http_children do defp http_children do
proxy_url = Config.get([:http, :proxy_url]) proxy_url = Config.get([:http, :proxy_url])
proxy = Pleroma.HTTP.AdapterHelper.format_proxy(proxy_url) proxy = Pleroma.HTTP.AdapterHelper.format_proxy(proxy_url)
pool_size = Config.get([:http, :pool_size], 10) pool_size = Config.get([:http, :pool_size])
pool_timeout = Config.get([:http, :pool_timeout], 60_000)
connection_timeout = Config.get([:http, :conn_max_idle_time], 10_000)
:public_key.cacerts_load() :public_key.cacerts_load()
@ -275,9 +288,6 @@ defp http_children do
|> Config.get([]) |> Config.get([])
|> Pleroma.HTTP.AdapterHelper.add_pool_size(pool_size) |> Pleroma.HTTP.AdapterHelper.add_pool_size(pool_size)
|> Pleroma.HTTP.AdapterHelper.maybe_add_proxy_pool(proxy) |> Pleroma.HTTP.AdapterHelper.maybe_add_proxy_pool(proxy)
|> Pleroma.HTTP.AdapterHelper.ensure_ipv6()
|> Pleroma.HTTP.AdapterHelper.add_default_conn_max_idle_time(connection_timeout)
|> Pleroma.HTTP.AdapterHelper.add_default_pool_max_idle_time(pool_timeout)
|> Keyword.put(:name, MyFinch) |> Keyword.put(:name, MyFinch)
[{Finch, config}] [{Finch, config}]

View file

@ -164,8 +164,7 @@ defp do_check_rum!(setting, migrate) do
defp check_system_commands!(:ok) do defp check_system_commands!(:ok) do
filter_commands_statuses = [ filter_commands_statuses = [
check_filter(Pleroma.Upload.Filter.Exiftool.StripMetadata, "exiftool"), check_filter(Pleroma.Upload.Filter.Exiftool, "exiftool"),
check_filter(Pleroma.Upload.Filter.Exiftool.ReadDescription, "exiftool"),
check_filter(Pleroma.Upload.Filter.Mogrify, "mogrify"), check_filter(Pleroma.Upload.Filter.Mogrify, "mogrify"),
check_filter(Pleroma.Upload.Filter.Mogrifun, "mogrify"), check_filter(Pleroma.Upload.Filter.Mogrifun, "mogrify"),
check_filter(Pleroma.Upload.Filter.AnalyzeMetadata, "mogrify"), check_filter(Pleroma.Upload.Filter.AnalyzeMetadata, "mogrify"),

View file

@ -68,10 +68,7 @@ defp fetch_page_items(id, items \\ []) do
items items
end end
else else
{:error, :not_found} -> {:error, {"Object has been deleted", _, _}} ->
items
{:error, :forbidden} ->
items items
{:error, error} -> {:error, error} ->

View file

@ -22,43 +22,6 @@ defmodule Pleroma.Config.DeprecationWarnings do
"\n* `config :pleroma, :instance, :quarantined_instances` is now covered by `:pleroma, :mrf_simple, :reject`"} "\n* `config :pleroma, :instance, :quarantined_instances` is now covered by `:pleroma, :mrf_simple, :reject`"}
] ]
def check_exiftool_filter do
filters = Config.get([Pleroma.Upload]) |> Keyword.get(:filters, [])
if Pleroma.Upload.Filter.Exiftool in filters do
Logger.warning("""
!!!DEPRECATION WARNING!!!
Your config is using Exiftool as a filter instead of Exiftool.StripMetadata. This should work for now, but you are advised to change to the new configuration to prevent possible issues later:
```
config :pleroma, Pleroma.Upload,
filters: [Pleroma.Upload.Filter.Exiftool]
```
Is now
```
config :pleroma, Pleroma.Upload,
filters: [Pleroma.Upload.Filter.Exiftool.StripMetadata]
```
""")
new_config =
filters
|> Enum.map(fn
Pleroma.Upload.Filter.Exiftool -> Pleroma.Upload.Filter.Exiftool.StripMetadata
filter -> filter
end)
Config.put([Pleroma.Upload, :filters], new_config)
:error
else
:ok
end
end
def check_simple_policy_tuples do def check_simple_policy_tuples do
has_strings = has_strings =
Config.get([:mrf_simple]) Config.get([:mrf_simple])
@ -221,8 +184,7 @@ def warn do
check_simple_policy_tuples(), check_simple_policy_tuples(),
check_http_adapter(), check_http_adapter(),
check_uploader_base_url_set(), check_uploader_base_url_set(),
check_uploader_base_url_is_not_base_domain(), check_uploader_base_url_is_not_base_domain()
check_exiftool_filter()
] ]
|> Enum.reduce(:ok, fn |> Enum.reduce(:ok, fn
:ok, :ok -> :ok :ok, :ok -> :ok

View file

@ -24,6 +24,7 @@ defp reboot_time_keys,
defp reboot_time_subkeys, defp reboot_time_subkeys,
do: [ do: [
{:pleroma, Pleroma.Captcha, [:seconds_valid]}, {:pleroma, Pleroma.Captcha, [:seconds_valid]},
{:pleroma, Pleroma.Upload, [:proxy_remote]},
{:pleroma, :instance, [:upload_limit]}, {:pleroma, :instance, [:upload_limit]},
{:pleroma, :http, [:pool_size]}, {:pleroma, :http, [:pool_size]},
{:pleroma, :http, [:proxy_url]} {:pleroma, :http, [:proxy_url]}

View file

@ -64,7 +64,4 @@ defmodule Pleroma.Constants do
"Service" "Service"
] ]
) )
# Internally used as top-level types for media attachments and user images
const(attachment_types, do: ["Document", "Image"])
end end

View file

@ -125,7 +125,7 @@ def add_file(%Pack{} = pack, _, _, %Plug.Upload{content_type: "application/zip"}
{:ok, _emoji_files} = {:ok, _emoji_files} =
:zip.unzip( :zip.unzip(
to_charlist(file.path), to_charlist(file.path),
[{:file_list, Enum.map(emojies, & &1[:path])}, {:cwd, to_charlist(tmp_dir)}] [{:file_list, Enum.map(emojies, & &1[:path])}, {:cwd, tmp_dir}]
) )
{_, updated_pack} = {_, updated_pack} =

View file

@ -79,10 +79,6 @@ def unzip(zip, dest) do
new_file_path = Path.join(dest, path) new_file_path = Path.join(dest, path)
new_file_path
|> Path.dirname()
|> File.rm()
new_file_path new_file_path
|> Path.dirname() |> Path.dirname()
|> File.mkdir_p!() |> File.mkdir_p!()

View file

@ -6,6 +6,8 @@ defmodule Pleroma.HTML do
# Scrubbers are compiled on boot so they can be configured in OTP releases # Scrubbers are compiled on boot so they can be configured in OTP releases
# @on_load :compile_scrubbers # @on_load :compile_scrubbers
@cachex Pleroma.Config.get([:cachex, :provider], Cachex)
def compile_scrubbers do def compile_scrubbers do
dir = Path.join(:code.priv_dir(:pleroma), "scrubbers") dir = Path.join(:code.priv_dir(:pleroma), "scrubbers")
@ -65,9 +67,22 @@ def ensure_scrubbed_html(
end end
end end
@spec extract_first_external_url_from_object(Pleroma.Object.t()) :: String.t() | nil def extract_first_external_url_from_object(%{data: %{"content" => content}} = object)
def extract_first_external_url_from_object(%{data: %{"content" => content}})
when is_binary(content) do when is_binary(content) do
unless object.data["fake"] do
key = "URL|#{object.id}"
@cachex.fetch!(:scrubber_cache, key, fn _key ->
{:commit, {:ok, extract_first_external_url(content)}}
end)
else
{:ok, extract_first_external_url(content)}
end
end
def extract_first_external_url_from_object(_), do: {:error, :no_content}
def extract_first_external_url(content) do
content content
|> Floki.parse_fragment!() |> Floki.parse_fragment!()
|> Floki.find("a:not(.mention,.hashtag,.attachment,[rel~=\"tag\"])") |> Floki.find("a:not(.mention,.hashtag,.attachment,[rel~=\"tag\"])")
@ -75,6 +90,4 @@ def extract_first_external_url_from_object(%{data: %{"content" => content}})
|> Floki.attribute("href") |> Floki.attribute("href")
|> Enum.at(0) |> Enum.at(0)
end end
def extract_first_external_url_from_object(_), do: nil
end end

View file

@ -74,12 +74,7 @@ def request(method, url, body, headers, options) when is_binary(url) do
request = build_request(method, headers, options, url, body, params) request = build_request(method, headers, options, url, body, params)
client = Tesla.client([Tesla.Middleware.FollowRedirects, Tesla.Middleware.Telemetry]) client = Tesla.client([Tesla.Middleware.FollowRedirects, Tesla.Middleware.Telemetry])
Logger.debug("Outbound: #{method} #{url}")
request(client, request) request(client, request)
rescue
e ->
Logger.error("Failed to fetch #{url}: #{inspect(e)}")
{:error, :fetch_error}
end end
@spec request(Client.t(), keyword()) :: {:ok, Env.t()} | {:error, any()} @spec request(Client.t(), keyword()) :: {:ok, Env.t()} | {:error, any()}

View file

@ -65,15 +65,6 @@ def add_pool_size(opts, pool_size) do
|> put_in([:pools, :default, :size], pool_size) |> put_in([:pools, :default, :size], pool_size)
end end
def ensure_ipv6(opts) do
# Default transport opts already enable IPv6, so just ensure they're loaded
opts
|> maybe_add_pools()
|> maybe_add_default_pool()
|> maybe_add_conn_opts()
|> maybe_add_transport_opts()
end
defp maybe_add_pools(opts) do defp maybe_add_pools(opts) do
if Keyword.has_key?(opts, :pools) do if Keyword.has_key?(opts, :pools) do
opts opts
@ -105,29 +96,11 @@ defp maybe_add_conn_opts(opts) do
defp maybe_add_transport_opts(opts) do defp maybe_add_transport_opts(opts) do
transport_opts = get_in(opts, [:pools, :default, :conn_opts, :transport_opts]) transport_opts = get_in(opts, [:pools, :default, :conn_opts, :transport_opts])
opts = unless is_nil(transport_opts) do
unless is_nil(transport_opts) do opts
opts else
else put_in(opts, [:pools, :default, :conn_opts, :transport_opts], [])
put_in(opts, [:pools, :default, :conn_opts, :transport_opts], []) end
end
# IPv6 is disabled and IPv4 enabled by default; ensure we can use both
put_in(opts, [:pools, :default, :conn_opts, :transport_opts, :inet6], true)
end
def add_default_pool_max_idle_time(opts, pool_timeout) do
opts
|> maybe_add_pools()
|> maybe_add_default_pool()
|> put_in([:pools, :default, :pool_max_idle_time], pool_timeout)
end
def add_default_conn_max_idle_time(opts, connection_timeout) do
opts
|> maybe_add_pools()
|> maybe_add_default_pool()
|> put_in([:pools, :default, :conn_max_idle_time], connection_timeout)
end end
@doc """ @doc """

View file

@ -1,121 +0,0 @@
defmodule Pleroma.HTTP.Backoff do
alias Pleroma.HTTP
require Logger
@cachex Pleroma.Config.get([:cachex, :provider], Cachex)
@backoff_cache :http_backoff_cache
# attempt to parse a timestamp from a header
# returns nil if it can't parse the timestamp
@spec timestamp_or_nil(binary) :: DateTime.t() | nil
defp timestamp_or_nil(header) do
case DateTime.from_iso8601(header) do
{:ok, stamp, _} ->
stamp
_ ->
nil
end
end
# attempt to parse the x-ratelimit-reset header from the headers
@spec x_ratelimit_reset(headers :: list) :: DateTime.t() | nil
defp x_ratelimit_reset(headers) do
with {_header, value} <- List.keyfind(headers, "x-ratelimit-reset", 0),
true <- is_binary(value) do
timestamp_or_nil(value)
else
_ ->
nil
end
end
# attempt to parse the Retry-After header from the headers
# this can be either a timestamp _or_ a number of seconds to wait!
# we'll return a datetime if we can parse it, or nil if we can't
@spec retry_after(headers :: list) :: DateTime.t() | nil
defp retry_after(headers) do
with {_header, value} <- List.keyfind(headers, "retry-after", 0),
true <- is_binary(value) do
# first, see if it's an integer
case Integer.parse(value) do
{seconds, ""} ->
Logger.debug("Parsed Retry-After header: #{seconds} seconds")
DateTime.utc_now() |> Timex.shift(seconds: seconds)
_ ->
# if it's not an integer, try to parse it as a timestamp
timestamp_or_nil(value)
end
else
_ ->
nil
end
end
# given a set of headers, will attempt to find the next backoff timestamp
# if it can't find one, it will default to 5 minutes from now
@spec next_backoff_timestamp(%{headers: list}) :: DateTime.t()
defp next_backoff_timestamp(%{headers: headers}) when is_list(headers) do
default_5_minute_backoff =
DateTime.utc_now()
|> Timex.shift(seconds: 5 * 60)
backoff =
[&x_ratelimit_reset/1, &retry_after/1]
|> Enum.map(& &1.(headers))
|> Enum.find(&(&1 != nil))
if is_nil(backoff) do
Logger.debug("No backoff headers found, defaulting to 5 minutes from now")
default_5_minute_backoff
else
Logger.debug("Found backoff header, will back off until: #{backoff}")
backoff
end
end
defp next_backoff_timestamp(_), do: DateTime.utc_now() |> Timex.shift(seconds: 5 * 60)
# utility function to check the HTTP response for potential backoff headers
# will check if we get a 429 or 503 response, and if we do, will back off for a bit
@spec check_backoff({:ok | :error, HTTP.Env.t()}, binary()) ::
{:ok | :error, HTTP.Env.t()} | {:error, :ratelimit}
defp check_backoff({:ok, env}, host) do
case env.status do
status when status in [429, 503] ->
Logger.error("Rate limited on #{host}! Backing off...")
timestamp = next_backoff_timestamp(env)
ttl = Timex.diff(timestamp, DateTime.utc_now(), :seconds)
# we will cache the host for 5 minutes
@cachex.put(@backoff_cache, host, true, ttl: ttl)
{:error, :ratelimit}
_ ->
{:ok, env}
end
end
defp check_backoff(env, _), do: env
@doc """
this acts as a single throughput for all GET requests
we will check if the host is in the cache, and if it is, we will automatically fail the request
this ensures that we don't hammer the server with requests, and instead wait for the backoff to expire
this is a very simple implementation, and can be improved upon!
"""
@spec get(binary, list, list) :: {:ok | :error, HTTP.Env.t()} | {:error, :ratelimit}
def get(url, headers \\ [], options \\ []) do
%{host: host} = URI.parse(url)
case @cachex.get(@backoff_cache, host) do
{:ok, nil} ->
url
|> HTTP.get(headers, options)
|> check_backoff(host)
_ ->
{:error, :ratelimit}
end
end
end

46
lib/pleroma/keys.ex Normal file
View file

@ -0,0 +1,46 @@
# Pleroma: A lightweight social networking server
# Copyright © 2017-2021 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.Keys do
# Native generation of RSA keys is only available since OTP 20+ and in default build conditions
# We try at compile time to generate natively an RSA key otherwise we fallback on the old way.
try do
_ = :public_key.generate_key({:rsa, 2048, 65_537})
def generate_rsa_pem do
key = :public_key.generate_key({:rsa, 2048, 65_537})
entry = :public_key.pem_entry_encode(:RSAPrivateKey, key)
pem = :public_key.pem_encode([entry]) |> String.trim_trailing()
{:ok, pem}
end
rescue
_ ->
def generate_rsa_pem do
port = Port.open({:spawn, "openssl genrsa"}, [:binary])
{:ok, pem} =
receive do
{^port, {:data, pem}} -> {:ok, pem}
end
Port.close(port)
if Regex.match?(~r/RSA PRIVATE KEY/, pem) do
{:ok, pem}
else
:error
end
end
end
def keys_from_pem(pem) do
with [private_key_code] <- :public_key.pem_decode(pem),
private_key <- :public_key.pem_entry_decode(private_key_code),
{:RSAPrivateKey, _, modulus, exponent, _, _, _, _, _, _, _} <- private_key do
{:ok, private_key, {:RSAPublicKey, modulus, exponent}}
else
error -> {:error, error}
end
end
end

View file

@ -178,10 +178,7 @@ def normalize(ap_id, options) when is_binary(ap_id) do
ap_id ap_id
Keyword.get(options, :fetch) -> Keyword.get(options, :fetch) ->
case Fetcher.fetch_object_from_id(ap_id, options) do Fetcher.fetch_object_from_id!(ap_id, options)
{:ok, object} -> object
_ -> nil
end
true -> true ->
get_cached_by_ap_id(ap_id) get_cached_by_ap_id(ap_id)

View file

@ -12,6 +12,8 @@ defmodule Pleroma.Object.Containment do
spoofing, therefore removal of object containment functions is NOT recommended. spoofing, therefore removal of object containment functions is NOT recommended.
""" """
alias Pleroma.Web.ActivityPub.Transmogrifier
def get_actor(%{"actor" => actor}) when is_binary(actor) do def get_actor(%{"actor" => actor}) when is_binary(actor) do
actor actor
end end
@ -48,39 +50,16 @@ def get_object(_) do
defp compare_uris(%URI{host: host} = _id_uri, %URI{host: host} = _other_uri), do: :ok defp compare_uris(%URI{host: host} = _id_uri, %URI{host: host} = _other_uri), do: :ok
defp compare_uris(_id_uri, _other_uri), do: :error defp compare_uris(_id_uri, _other_uri), do: :error
defp uri_strip_slash(%URI{path: path} = uri) when is_binary(path), defp compare_uris_exact(uri, uri), do: :ok
do: %{uri | path: String.replace_suffix(path, "/", "")}
defp uri_strip_slash(uri), do: uri defp compare_uris_exact(%URI{} = id, %URI{} = other),
do: compare_uris_exact(URI.to_string(id), URI.to_string(other))
# domain names are case-insensitive per spec (other parts of URIs arent necessarily) defp compare_uris_exact(id_uri, other_uri)
defp uri_normalise_host(%URI{host: host} = uri) when is_binary(host), when is_binary(id_uri) and is_binary(other_uri) do
do: %{uri | host: String.downcase(host, :ascii)} norm_id = String.replace_suffix(id_uri, "/", "")
norm_other = String.replace_suffix(other_uri, "/", "")
defp uri_normalise_host(uri), do: uri if norm_id == norm_other, do: :ok, else: :error
defp compare_uri_identities(uri, uri), do: :ok
defp compare_uri_identities(id_uri, other_uri) when is_binary(id_uri) and is_binary(other_uri),
do: compare_uri_identities(URI.parse(id_uri), URI.parse(other_uri))
defp compare_uri_identities(%URI{} = id, %URI{} = other) do
normid =
%{id | fragment: nil}
|> uri_strip_slash()
|> uri_normalise_host()
normother =
%{other | fragment: nil}
|> uri_strip_slash()
|> uri_normalise_host()
# Conversion back to binary avoids issues from non-normalised deprecated authority field
if URI.to_string(normid) == URI.to_string(normother) do
:ok
else
:error
end
end end
@doc """ @doc """
@ -114,13 +93,21 @@ def contain_origin(id, %{"attributedTo" => actor} = params),
def contain_origin(_id, _data), do: :ok def contain_origin(_id, _data), do: :ok
@doc """ @doc """
Check whether the fetch URL (after redirects) is the Check whether the fetch URL (after redirects) exactly (sans tralining slash) matches either
same location the canonical ActivityPub id points to. the canonical ActivityPub id or the objects url field (for display URLs from *key and Mastodon)
Since this is meant to be used for fetches, anonymous or transient objects are not accepted here. Since this is meant to be used for fetches, anonymous or transient objects are not accepted here.
""" """
def contain_id_to_fetch(url, %{"id" => id}) when is_binary(id) do def contain_id_to_fetch(url, %{"id" => id} = data) when is_binary(id) do
compare_uri_identities(url, id) with {:id, :error} <- {:id, compare_uris_exact(id, url)},
# "url" can be a "Link" object and this is checked before full normalisation
display_url <- Transmogrifier.fix_url(data)["url"],
true <- display_url != nil do
compare_uris_exact(display_url, url)
else
{:id, :ok} -> :ok
_ -> :error
end
end end
def contain_id_to_fetch(_url, _data), do: :error def contain_id_to_fetch(_url, _data), do: :error

View file

@ -116,13 +116,13 @@ defp reinject_object(%Object{} = object, new_data) do
@doc "Assumes object already is in our database and refetches from remote to update (e.g. for polls)" @doc "Assumes object already is in our database and refetches from remote to update (e.g. for polls)"
def refetch_object(%Object{data: %{"id" => id}} = object) do def refetch_object(%Object{data: %{"id" => id}} = object) do
with {:local, false} <- {:local, Object.local?(object)}, with {:local, false} <- {:local, Object.local?(object)},
{:ok, new_data} <- fetch_and_contain_remote_object_from_id(id, true), {:ok, new_data} <- fetch_and_contain_remote_object_from_id(id),
{:id, true} <- {:id, new_data["id"] == id}, {:id, true} <- {:id, new_data["id"] == id},
{:ok, object} <- reinject_object(object, new_data) do {:ok, object} <- reinject_object(object, new_data) do
{:ok, object} {:ok, object}
else else
{:local, true} -> {:ok, object} {:local, true} -> {:ok, object}
{:id, false} -> {:error, :id_mismatch} {:id, false} -> {:error, "Object id changed on refetch"}
e -> {:error, e} e -> {:error, e}
end end
end end
@ -136,13 +136,10 @@ def refetch_object(%Object{data: %{"id" => id}} = object) do
def fetch_object_from_id(id, options \\ []) do def fetch_object_from_id(id, options \\ []) do
with %URI{} = uri <- URI.parse(id), with %URI{} = uri <- URI.parse(id),
# let's check the URI is even vaguely valid first # let's check the URI is even vaguely valid first
{:valid_uri_scheme, true} <- {:scheme, true} <- {:scheme, uri.scheme == "http" or uri.scheme == "https"},
{:valid_uri_scheme, uri.scheme == "http" or uri.scheme == "https"},
# If we have instance restrictions, apply them here to prevent fetching from unwanted instances # If we have instance restrictions, apply them here to prevent fetching from unwanted instances
{:mrf_reject_check, {:ok, nil}} <- {:ok, nil} <- Pleroma.Web.ActivityPub.MRF.SimplePolicy.check_reject(uri),
{:mrf_reject_check, Pleroma.Web.ActivityPub.MRF.SimplePolicy.check_reject(uri)}, {:ok, _} <- Pleroma.Web.ActivityPub.MRF.SimplePolicy.check_accept(uri),
{:mrf_accept_check, {:ok, _}} <-
{:mrf_accept_check, Pleroma.Web.ActivityPub.MRF.SimplePolicy.check_accept(uri)},
{_, nil} <- {:fetch_object, Object.get_cached_by_ap_id(id)}, {_, nil} <- {:fetch_object, Object.get_cached_by_ap_id(id)},
{_, true} <- {:allowed_depth, Federator.allowed_thread_distance?(options[:depth])}, {_, true} <- {:allowed_depth, Federator.allowed_thread_distance?(options[:depth])},
{_, {:ok, data}} <- {:fetch, fetch_and_contain_remote_object_from_id(id)}, {_, {:ok, data}} <- {:fetch, fetch_and_contain_remote_object_from_id(id)},
@ -154,37 +151,20 @@ def fetch_object_from_id(id, options \\ []) do
{:object, data, Object.normalize(activity, fetch: false)} do {:object, data, Object.normalize(activity, fetch: false)} do
{:ok, object} {:ok, object}
else else
{:allowed_depth, false} = e -> {:allowed_depth, false} ->
log_fetch_error(id, e) {:error, "Max thread distance exceeded."}
{:error, :allowed_depth}
{:valid_uri_scheme, _} = e -> {:scheme, false} ->
log_fetch_error(id, e) {:error, "URI Scheme Invalid"}
{:error, :invalid_uri_scheme}
{:mrf_reject_check, _} = e -> {:transmogrifier, {:error, {:reject, e}}} ->
log_fetch_error(id, e) {:reject, e}
{:reject, :mrf}
{:mrf_accept_check, _} = e -> {:transmogrifier, {:reject, e}} ->
log_fetch_error(id, e) {:reject, e}
{:reject, :mrf}
{:containment, reason} = e -> {:transmogrifier, _} = e ->
log_fetch_error(id, e) {:error, e}
{:error, reason}
{:transmogrifier, {:error, {:reject, reason}}} = e ->
log_fetch_error(id, e)
{:reject, reason}
{:transmogrifier, {:reject, reason}} = e ->
log_fetch_error(id, e)
{:reject, reason}
{:transmogrifier, reason} = e ->
log_fetch_error(id, e)
{:error, reason}
{:object, data, nil} -> {:object, data, nil} ->
reinject_object(%Object{}, data) reinject_object(%Object{}, data)
@ -195,21 +175,17 @@ def fetch_object_from_id(id, options \\ []) do
{:fetch_object, %Object{} = object} -> {:fetch_object, %Object{} = object} ->
{:ok, object} {:ok, object}
{:fetch, {:error, reason}} = e -> {:fetch, {:error, error}} ->
log_fetch_error(id, e) {:error, error}
{:error, reason}
{:reject, reason} ->
{:reject, reason}
e -> e ->
log_fetch_error(id, e) e
{:error, e}
end end
end end
defp log_fetch_error(id, error) do
Logger.metadata(object: id)
Logger.error("Object rejected while fetching #{id} #{inspect(error)}")
end
defp prepare_activity_params(data) do defp prepare_activity_params(data) do
%{ %{
"type" => "Create", "type" => "Create",
@ -223,6 +199,27 @@ defp prepare_activity_params(data) do
|> Maps.put_if_present("bcc", data["bcc"]) |> Maps.put_if_present("bcc", data["bcc"])
end end
@doc "Identical to `fetch_object_from_id/2` but just directly returns the object or on error `nil`"
def fetch_object_from_id!(id, options \\ []) do
with {:ok, object} <- fetch_object_from_id(id, options) do
object
else
{:error, %Tesla.Mock.Error{}} ->
nil
{:error, {"Object has been deleted", _id, _code}} ->
nil
{:reject, reason} ->
Logger.debug("Rejected #{id} while fetching: #{inspect(reason)}")
nil
e ->
Logger.error("Error while fetching #{id}: #{inspect(e)}")
nil
end
end
defp make_signature(id, date) do defp make_signature(id, date) do
uri = URI.parse(id) uri = URI.parse(id)
@ -253,71 +250,38 @@ defp maybe_date_fetch(headers, date) do
end end
end end
@doc """ @doc "Fetches arbitrary remote object and performs basic safety and authenticity checks"
Fetches arbitrary remote object and performs basic safety and authenticity checks. def fetch_and_contain_remote_object_from_id(id)
When the fetch URL is known to already be a canonical AP id, checks are stricter.
"""
def fetch_and_contain_remote_object_from_id(id, is_ap_id \\ false)
def fetch_and_contain_remote_object_from_id(%{"id" => id}, is_ap_id), def fetch_and_contain_remote_object_from_id(%{"id" => id}),
do: fetch_and_contain_remote_object_from_id(id, is_ap_id) do: fetch_and_contain_remote_object_from_id(id)
def fetch_and_contain_remote_object_from_id(id, is_ap_id) when is_binary(id) do def fetch_and_contain_remote_object_from_id(id) when is_binary(id) do
Logger.debug("Fetching object #{id} via AP [ap_id=#{is_ap_id}]") Logger.debug("Fetching object #{id} via AP")
with {:valid_uri_scheme, true} <- {:valid_uri_scheme, String.starts_with?(id, "http")}, with {:scheme, true} <- {:scheme, String.starts_with?(id, "http")},
%URI{} = uri <- URI.parse(id), {_, :ok} <- {:local_fetch, Containment.contain_local_fetch(id)},
{:mrf_reject_check, {:ok, nil}} <-
{:mrf_reject_check, Pleroma.Web.ActivityPub.MRF.SimplePolicy.check_reject(uri)},
{:mrf_accept_check, {:ok, _}} <-
{:mrf_accept_check, Pleroma.Web.ActivityPub.MRF.SimplePolicy.check_accept(uri)},
{:local_fetch, :ok} <- {:local_fetch, Containment.contain_local_fetch(id)},
{:ok, final_id, body} <- get_object(id), {:ok, final_id, body} <- get_object(id),
# a canonical ID shouldn't be a redirect
true <- !is_ap_id || final_id == id,
{:ok, data} <- safe_json_decode(body), {:ok, data} <- safe_json_decode(body),
{_, :ok} <- {:containment, Containment.contain_origin(final_id, data)}, {_, :ok} <- {:strict_id, Containment.contain_id_to_fetch(final_id, data)},
{_, _, :ok} <- {:strict_id, data["id"], Containment.contain_id_to_fetch(final_id, data)} do {_, :ok} <- {:containment, Containment.contain_origin(final_id, data)} do
unless Instances.reachable?(final_id) do unless Instances.reachable?(final_id) do
Instances.set_reachable(final_id) Instances.set_reachable(final_id)
end end
{:ok, data} {:ok, data}
else else
# E.g. Mastodon and *key serve the AP object directly under their display URLs without {:strict_id, _} ->
# redirecting to their canonical location first, thus ids will expectedly differ. {:error, "Object's ActivityPub id/url does not match final fetch URL"}
# Similarly keys, either use a fragment ID and are a subobjects or a distinct ID
# but for compatibility are still a subobject presenting their owning actors ID at the toplevel.
# Refetching _once_ from the listed id, should yield a strict match afterwards.
{:strict_id, ap_id, _} = e ->
case is_ap_id do
false ->
fetch_and_contain_remote_object_from_id(ap_id, true)
true -> {:scheme, _} ->
log_fetch_error(id, e) {:error, "Unsupported URI scheme"}
{:error, :id_mismatch}
end
{:mrf_reject_check, _} = e -> {:local_fetch, _} ->
log_fetch_error(id, e) {:error, "Trying to fetch local resource"}
{:reject, :mrf}
{:mrf_accept_check, _} = e -> {:containment, _} ->
log_fetch_error(id, e) {:error, "Object containment failed."}
{:reject, :mrf}
{:valid_uri_scheme, _} = e ->
log_fetch_error(id, e)
{:error, :invalid_uri_scheme}
{:local_fetch, _} = e ->
log_fetch_error(id, e)
{:error, :local_resource}
{:containment, reason} ->
log_fetch_error(id, reason)
{:error, {:containment, reason}}
{:error, e} -> {:error, e} ->
{:error, e} {:error, e}
@ -327,13 +291,25 @@ def fetch_and_contain_remote_object_from_id(id, is_ap_id) when is_binary(id) do
end end
end end
def fetch_and_contain_remote_object_from_id(_id, _is_ap_id), def fetch_and_contain_remote_object_from_id(_id),
do: {:error, :invalid_id} do: {:error, "id must be a string"}
defp check_crossdomain_redirect(final_host, original_url)
# HOPEFULLY TEMPORARY # HOPEFULLY TEMPORARY
# Basically none of our Tesla mocks in tests set the (supposed to # Basically none of our Tesla mocks in tests set the (supposed to
# exist for Tesla proper) url parameter for their responses # exist for Tesla proper) url parameter for their responses
# causing almost every fetch in test to fail otherwise # causing almost every fetch in test to fail otherwise
if @mix_env == :test do
defp check_crossdomain_redirect(nil, _) do
{:cross_domain_redirect, false}
end
end
defp check_crossdomain_redirect(final_host, original_url) do
{:cross_domain_redirect, final_host != URI.parse(original_url).host}
end
if @mix_env == :test do if @mix_env == :test do
defp get_final_id(nil, initial_url), do: initial_url defp get_final_id(nil, initial_url), do: initial_url
defp get_final_id("", initial_url), do: initial_url defp get_final_id("", initial_url), do: initial_url
@ -348,17 +324,17 @@ def get_object(id) do
date = Pleroma.Signature.signed_date() date = Pleroma.Signature.signed_date()
headers = headers =
[ [{"accept", "application/activity+json"}]
# The first is required by spec, the second provided as a fallback for buggy implementations
{"accept", "application/ld+json; profile=\"https://www.w3.org/ns/activitystreams\""},
{"accept", "application/activity+json"}
]
|> maybe_date_fetch(date) |> maybe_date_fetch(date)
|> sign_fetch(id, date) |> sign_fetch(id, date)
with {:ok, %{body: body, status: code, headers: headers, url: final_url}} with {:ok, %{body: body, status: code, headers: headers, url: final_url}}
when code in 200..299 <- when code in 200..299 <-
HTTP.Backoff.get(id, headers), HTTP.get(id, headers),
remote_host <-
URI.parse(final_url).host,
{:cross_domain_redirect, false} <-
check_crossdomain_redirect(remote_host, id),
{:has_content_type, {_, content_type}} <- {:has_content_type, {_, content_type}} <-
{:has_content_type, List.keyfind(headers, "content-type", 0)}, {:has_content_type, List.keyfind(headers, "content-type", 0)},
{:parse_content_type, {:ok, "application", subtype, type_params}} <- {:parse_content_type, {:ok, "application", subtype, type_params}} <-
@ -369,22 +345,15 @@ def get_object(id) do
{"activity+json", _} -> {"activity+json", _} ->
{:ok, final_id, body} {:ok, final_id, body}
{"ld+json", %{"profile" => profiles}} -> {"ld+json", %{"profile" => "https://www.w3.org/ns/activitystreams"}} ->
if "https://www.w3.org/ns/activitystreams" in String.split(profiles) do {:ok, final_id, body}
{:ok, final_id, body}
else
{:error, {:content_type, content_type}}
end
_ -> _ ->
{:error, {:content_type, content_type}} {:error, {:content_type, content_type}}
end end
else else
{:ok, %{status: code}} when code in [401, 403] ->
{:error, :forbidden}
{:ok, %{status: code}} when code in [404, 410] -> {:ok, %{status: code}} when code in [404, 410] ->
{:error, :not_found} {:error, {"Object has been deleted", id, code}}
{:error, e} -> {:error, e} ->
{:error, e} {:error, e}

View file

@ -28,7 +28,7 @@ defmodule Pleroma.ScheduledActivity do
timestamps() timestamps()
end end
defp changeset(%ScheduledActivity{} = scheduled_activity, attrs) do def changeset(%ScheduledActivity{} = scheduled_activity, attrs) do
scheduled_activity scheduled_activity
|> cast(attrs, [:scheduled_at, :params]) |> cast(attrs, [:scheduled_at, :params])
|> validate_required([:scheduled_at, :params]) |> validate_required([:scheduled_at, :params])
@ -40,36 +40,26 @@ defp with_media_attachments(
%{changes: %{params: %{"media_ids" => media_ids} = params}} = changeset %{changes: %{params: %{"media_ids" => media_ids} = params}} = changeset
) )
when is_list(media_ids) do when is_list(media_ids) do
user = User.get_by_id(changeset.data.user_id) media_attachments = Utils.attachments_from_ids(%{media_ids: media_ids})
case Utils.attachments_from_ids(user, %{media_ids: media_ids}) do params =
media_attachments when is_list(media_attachments) -> params
params = |> Map.put("media_attachments", media_attachments)
params |> Map.put("media_ids", media_ids)
|> Map.put("media_attachments", media_attachments)
|> Map.put("media_ids", media_ids)
put_change(changeset, :params, params) put_change(changeset, :params, params)
{:error, _} = e ->
e
e ->
{:error, e}
end
end end
defp with_media_attachments(changeset), do: changeset defp with_media_attachments(changeset), do: changeset
defp update_changeset(%ScheduledActivity{} = scheduled_activity, attrs) do def update_changeset(%ScheduledActivity{} = scheduled_activity, attrs) do
# note: should this ever allow swapping media attachments, make sure ownership is checked
scheduled_activity scheduled_activity
|> cast(attrs, [:scheduled_at]) |> cast(attrs, [:scheduled_at])
|> validate_required([:scheduled_at]) |> validate_required([:scheduled_at])
|> validate_scheduled_at() |> validate_scheduled_at()
end end
defp validate_scheduled_at(changeset) do def validate_scheduled_at(changeset) do
validate_change(changeset, :scheduled_at, fn _, scheduled_at -> validate_change(changeset, :scheduled_at, fn _, scheduled_at ->
cond do cond do
not far_enough?(scheduled_at) -> not far_enough?(scheduled_at) ->
@ -87,7 +77,7 @@ defp validate_scheduled_at(changeset) do
end) end)
end end
defp exceeds_daily_user_limit?(user_id, scheduled_at) do def exceeds_daily_user_limit?(user_id, scheduled_at) do
ScheduledActivity ScheduledActivity
|> where(user_id: ^user_id) |> where(user_id: ^user_id)
|> where([sa], type(sa.scheduled_at, :date) == type(^scheduled_at, :date)) |> where([sa], type(sa.scheduled_at, :date) == type(^scheduled_at, :date))
@ -96,7 +86,7 @@ defp exceeds_daily_user_limit?(user_id, scheduled_at) do
|> Kernel.>=(Config.get([ScheduledActivity, :daily_user_limit])) |> Kernel.>=(Config.get([ScheduledActivity, :daily_user_limit]))
end end
defp exceeds_total_user_limit?(user_id) do def exceeds_total_user_limit?(user_id) do
ScheduledActivity ScheduledActivity
|> where(user_id: ^user_id) |> where(user_id: ^user_id)
|> select([sa], count(sa.id)) |> select([sa], count(sa.id))
@ -118,29 +108,20 @@ def far_enough?(scheduled_at) do
diff > @min_offset diff > @min_offset
end end
defp new(%User{} = user, attrs) do def new(%User{} = user, attrs) do
changeset(%ScheduledActivity{user_id: user.id}, attrs) changeset(%ScheduledActivity{user_id: user.id}, attrs)
end end
@doc """ @doc """
Creates ScheduledActivity and add to queue to perform at scheduled_at date Creates ScheduledActivity and add to queue to perform at scheduled_at date
""" """
@spec create(User.t(), map()) :: {:ok, ScheduledActivity.t()} | {:error, any()} @spec create(User.t(), map()) :: {:ok, ScheduledActivity.t()} | {:error, Ecto.Changeset.t()}
def create(%User{} = user, attrs) do def create(%User{} = user, attrs) do
case new(user, attrs) do Multi.new()
%Ecto.Changeset{} = sched_data -> |> Multi.insert(:scheduled_activity, new(user, attrs))
Multi.new() |> maybe_add_jobs(Config.get([ScheduledActivity, :enabled]))
|> Multi.insert(:scheduled_activity, sched_data) |> Repo.transaction()
|> maybe_add_jobs(Config.get([ScheduledActivity, :enabled])) |> transaction_response
|> Repo.transaction()
|> transaction_response
{:error, _} = e ->
e
e ->
{:error, e}
end
end end
defp maybe_add_jobs(multi, true) do defp maybe_add_jobs(multi, true) do
@ -206,7 +187,17 @@ def for_user_query(%User{} = user) do
|> where(user_id: ^user.id) |> where(user_id: ^user.id)
end end
defp job_query(scheduled_activity_id) do def due_activities(offset \\ 0) do
naive_datetime =
NaiveDateTime.utc_now()
|> NaiveDateTime.add(offset, :millisecond)
ScheduledActivity
|> where([sa], sa.scheduled_at < ^naive_datetime)
|> Repo.all()
end
def job_query(scheduled_activity_id) do
from(j in Oban.Job, from(j in Oban.Job,
where: j.queue == "scheduled_activities", where: j.queue == "scheduled_activities",
where: fragment("args ->> 'activity_id' = ?::text", ^to_string(scheduled_activity_id)) where: fragment("args ->> 'activity_id' = ?::text", ^to_string(scheduled_activity_id))

View file

@ -21,12 +21,19 @@ def search(user, search_query, options \\ []) do
offset = Keyword.get(options, :offset, 0) offset = Keyword.get(options, :offset, 0)
author = Keyword.get(options, :author) author = Keyword.get(options, :author)
search_function =
if :persistent_term.get({Pleroma.Repo, :postgres_version}) >= 11 do
:websearch
else
:plain
end
try do try do
Activity Activity
|> Activity.with_preloaded_object() |> Activity.with_preloaded_object()
|> Activity.restrict_deactivated_users() |> Activity.restrict_deactivated_users()
|> restrict_public() |> restrict_public()
|> query_with(index_type, search_query) |> query_with(index_type, search_query, search_function)
|> maybe_restrict_local(user) |> maybe_restrict_local(user)
|> maybe_restrict_author(author) |> maybe_restrict_author(author)
|> maybe_restrict_blocked(user) |> maybe_restrict_blocked(user)
@ -65,7 +72,25 @@ def restrict_public(q) do
) )
end end
defp query_with(q, :gin, search_query) do defp query_with(q, :gin, search_query, :plain) do
%{rows: [[tsc]]} =
Ecto.Adapters.SQL.query!(
Pleroma.Repo,
"select current_setting('default_text_search_config')::regconfig::oid;"
)
from([a, o] in q,
where:
fragment(
"to_tsvector(?::oid::regconfig, ?->>'content') @@ plainto_tsquery(?)",
^tsc,
o.data,
^search_query
)
)
end
defp query_with(q, :gin, search_query, :websearch) do
%{rows: [[tsc]]} = %{rows: [[tsc]]} =
Ecto.Adapters.SQL.query!( Ecto.Adapters.SQL.query!(
Pleroma.Repo, Pleroma.Repo,
@ -83,7 +108,19 @@ defp query_with(q, :gin, search_query) do
) )
end end
defp query_with(q, :rum, search_query) do defp query_with(q, :rum, search_query, :plain) do
from([a, o] in q,
where:
fragment(
"? @@ plainto_tsquery(?)",
o.fts_content,
^search_query
),
order_by: [fragment("? <=> now()::date", o.inserted_at)]
)
end
defp query_with(q, :rum, search_query, :websearch) do
from([a, o] in q, from([a, o] in q,
where: where:
fragment( fragment(

View file

@ -5,27 +5,15 @@ defmodule Pleroma.Search.Meilisearch do
alias Pleroma.Activity alias Pleroma.Activity
import Pleroma.Search.DatabaseSearch import Pleroma.Search.DatabaseSearch
import Ecto.Query
@behaviour Pleroma.Search.SearchBackend @behaviour Pleroma.Search.SearchBackend
defp meili_headers(key) do defp meili_headers do
key_header =
if is_nil(key), do: [], else: [{"Authorization", "Bearer #{key}"}]
[{"Content-Type", "application/json"} | key_header]
end
defp meili_headers_admin do
private_key = Pleroma.Config.get([Pleroma.Search.Meilisearch, :private_key]) private_key = Pleroma.Config.get([Pleroma.Search.Meilisearch, :private_key])
meili_headers(private_key)
end
defp meili_headers_search do [{"Content-Type", "application/json"}] ++
search_key = if is_nil(private_key), do: [], else: [{"Authorization", "Bearer #{private_key}"}]
Pleroma.Config.get([Pleroma.Search.Meilisearch, :search_key]) ||
Pleroma.Config.get([Pleroma.Search.Meilisearch, :private_key])
meili_headers(search_key)
end end
def meili_get(path) do def meili_get(path) do
@ -34,7 +22,7 @@ def meili_get(path) do
result = result =
Pleroma.HTTP.get( Pleroma.HTTP.get(
Path.join(endpoint, path), Path.join(endpoint, path),
meili_headers_admin() meili_headers()
) )
with {:ok, res} <- result do with {:ok, res} <- result do
@ -42,14 +30,14 @@ def meili_get(path) do
end end
end end
defp meili_search(params) do def meili_post(path, params) do
endpoint = Pleroma.Config.get([Pleroma.Search.Meilisearch, :url]) endpoint = Pleroma.Config.get([Pleroma.Search.Meilisearch, :url])
result = result =
Pleroma.HTTP.post( Pleroma.HTTP.post(
Path.join(endpoint, "/indexes/objects/search"), Path.join(endpoint, path),
Jason.encode!(params), Jason.encode!(params),
meili_headers_search() meili_headers()
) )
with {:ok, res} <- result do with {:ok, res} <- result do
@ -65,7 +53,7 @@ def meili_put(path, params) do
:put, :put,
Path.join(endpoint, path), Path.join(endpoint, path),
Jason.encode!(params), Jason.encode!(params),
meili_headers_admin(), meili_headers(),
[] []
) )
@ -82,7 +70,7 @@ def meili_delete!(path) do
:delete, :delete,
Path.join(endpoint, path), Path.join(endpoint, path),
"", "",
meili_headers_admin(), meili_headers(),
[] []
) )
end end
@ -93,20 +81,25 @@ def search(user, query, options \\ []) do
author = Keyword.get(options, :author) author = Keyword.get(options, :author)
res = res =
meili_search(%{q: query, offset: offset, limit: limit}) meili_post(
"/indexes/objects/search",
%{q: query, offset: offset, limit: limit}
)
with {:ok, result} <- res do with {:ok, result} <- res do
hits = result["hits"] |> Enum.map(& &1["ap"]) hits = result["hits"] |> Enum.map(& &1["ap"])
try do try do
hits hits
|> Activity.get_presorted_create_by_object_ap_id() |> Activity.create_by_object_ap_id()
|> Activity.with_preloaded_object()
|> Activity.with_preloaded_object() |> Activity.with_preloaded_object()
|> Activity.restrict_deactivated_users() |> Activity.restrict_deactivated_users()
|> maybe_restrict_local(user) |> maybe_restrict_local(user)
|> maybe_restrict_author(author) |> maybe_restrict_author(author)
|> maybe_restrict_blocked(user) |> maybe_restrict_blocked(user)
|> maybe_fetch(user, query) |> maybe_fetch(user, query)
|> order_by([object: obj], desc: obj.data["published"])
|> Pleroma.Repo.all() |> Pleroma.Repo.all()
rescue rescue
_ -> maybe_fetch([], user, query) _ -> maybe_fetch([], user, query)

View file

@ -5,25 +5,47 @@
defmodule Pleroma.Signature do defmodule Pleroma.Signature do
@behaviour HTTPSignatures.Adapter @behaviour HTTPSignatures.Adapter
alias Pleroma.EctoType.ActivityPub.ObjectValidators
alias Pleroma.Keys
alias Pleroma.User alias Pleroma.User
alias Pleroma.User.SigningKey alias Pleroma.Web.ActivityPub.ActivityPub
require Logger
@known_suffixes ["/publickey", "/main-key"]
def key_id_to_actor_id(key_id) do def key_id_to_actor_id(key_id) do
case SigningKey.key_id_to_ap_id(key_id) do uri =
nil -> key_id
# hm, we SHOULD have gotten this in the pipeline before we hit here! |> URI.parse()
Logger.error("Could not figure out who owns the key #{key_id}") |> Map.put(:fragment, nil)
{:error, :key_owner_not_found} |> Map.put(:query, nil)
|> remove_suffix(@known_suffixes)
key -> maybe_ap_id = URI.to_string(uri)
{:ok, key}
case ObjectValidators.ObjectID.cast(maybe_ap_id) do
{:ok, ap_id} ->
{:ok, ap_id}
_ ->
case Pleroma.Web.WebFinger.finger(maybe_ap_id) do
{:ok, %{"ap_id" => ap_id}} -> {:ok, ap_id}
_ -> {:error, maybe_ap_id}
end
end end
end end
defp remove_suffix(uri, [test | rest]) do
if not is_nil(uri.path) and String.ends_with?(uri.path, test) do
Map.put(uri, :path, String.replace(uri.path, test, ""))
else
remove_suffix(uri, rest)
end
end
defp remove_suffix(uri, []), do: uri
def fetch_public_key(conn) do def fetch_public_key(conn) do
with %{"keyId" => kid} <- HTTPSignatures.signature_for_conn(conn), with %{"keyId" => kid} <- HTTPSignatures.signature_for_conn(conn),
{:ok, %SigningKey{}} <- SigningKey.get_or_fetch_by_key_id(kid),
{:ok, actor_id} <- key_id_to_actor_id(kid), {:ok, actor_id} <- key_id_to_actor_id(kid),
{:ok, public_key} <- User.get_public_key_for_ap_id(actor_id) do {:ok, public_key} <- User.get_public_key_for_ap_id(actor_id) do
{:ok, public_key} {:ok, public_key}
@ -35,8 +57,8 @@ def fetch_public_key(conn) do
def refetch_public_key(conn) do def refetch_public_key(conn) do
with %{"keyId" => kid} <- HTTPSignatures.signature_for_conn(conn), with %{"keyId" => kid} <- HTTPSignatures.signature_for_conn(conn),
{:ok, %SigningKey{}} <- SigningKey.get_or_fetch_by_key_id(kid),
{:ok, actor_id} <- key_id_to_actor_id(kid), {:ok, actor_id} <- key_id_to_actor_id(kid),
{:ok, _user} <- ActivityPub.make_user_from_ap_id(actor_id),
{:ok, public_key} <- User.get_public_key_for_ap_id(actor_id) do {:ok, public_key} <- User.get_public_key_for_ap_id(actor_id) do
{:ok, public_key} {:ok, public_key}
else else
@ -45,9 +67,9 @@ def refetch_public_key(conn) do
end end
end end
def sign(%User{} = user, headers) do def sign(%User{keys: keys} = user, headers) do
with {:ok, private_key} <- SigningKey.private_key(user) do with {:ok, private_key, _} <- Keys.keys_from_pem(keys) do
HTTPSignatures.sign(private_key, SigningKey.local_key_id(user.ap_id), headers) HTTPSignatures.sign(private_key, user.ap_id <> "#main-key", headers)
end end
end end

View file

@ -13,6 +13,7 @@ defmodule Pleroma.Upload do
* `:uploader`: override uploader * `:uploader`: override uploader
* `:filters`: override filters * `:filters`: override filters
* `:size_limit`: override size limit * `:size_limit`: override size limit
* `:activity_type`: override activity type
The `%Pleroma.Upload{}` struct: all documented fields are meant to be overwritten in filters: The `%Pleroma.Upload{}` struct: all documented fields are meant to be overwritten in filters:
@ -38,6 +39,8 @@ defmodule Pleroma.Upload do
alias Pleroma.Web.ActivityPub.Utils alias Pleroma.Web.ActivityPub.Utils
require Logger require Logger
@mix_env Mix.env()
@type source :: @type source ::
Plug.Upload.t() Plug.Upload.t()
| (data_uri_string :: String.t()) | (data_uri_string :: String.t())
@ -47,6 +50,7 @@ defmodule Pleroma.Upload do
@type option :: @type option ::
{:type, :avatar | :banner | :background} {:type, :avatar | :banner | :background}
| {:description, String.t()} | {:description, String.t()}
| {:activity_type, String.t()}
| {:size_limit, nil | non_neg_integer()} | {:size_limit, nil | non_neg_integer()}
| {:uploader, module()} | {:uploader, module()}
| {:filters, [module()]} | {:filters, [module()]}
@ -59,23 +63,12 @@ defmodule Pleroma.Upload do
width: integer(), width: integer(),
height: integer(), height: integer(),
blurhash: String.t(), blurhash: String.t(),
description: String.t(),
path: String.t() path: String.t()
} }
@always_enabled_filters [Pleroma.Upload.Filter.Dedupe] @always_enabled_filters [Pleroma.Upload.Filter.Dedupe]
defstruct [ defstruct [:id, :name, :tempfile, :content_type, :width, :height, :blurhash, :path]
:id,
:name,
:tempfile,
:content_type,
:width,
:height,
:blurhash,
:description,
:path
]
@spec store(source, options :: [option()]) :: {:ok, Map.t()} | {:error, any()} @spec store(source, options :: [option()]) :: {:ok, Map.t()} | {:error, any()}
@doc "Store a file. If using a `Plug.Upload{}` as the source, be sure to use `Majic.Plug` to ensure its content_type and filename is correct." @doc "Store a file. If using a `Plug.Upload{}` as the source, be sure to use `Majic.Plug` to ensure its content_type and filename is correct."
@ -85,7 +78,7 @@ def store(upload, opts \\ []) do
with {:ok, upload} <- prepare_upload(upload, opts), with {:ok, upload} <- prepare_upload(upload, opts),
upload = %__MODULE__{upload | path: upload.path || "#{upload.id}/#{upload.name}"}, upload = %__MODULE__{upload | path: upload.path || "#{upload.id}/#{upload.name}"},
{:ok, upload} <- Pleroma.Upload.Filter.filter(opts.filters, upload), {:ok, upload} <- Pleroma.Upload.Filter.filter(opts.filters, upload),
description = Map.get(upload, :description) || "", description = Map.get(opts, :description) || "",
{_, true} <- {_, true} <-
{:description_limit, {:description_limit,
String.length(description) <= Pleroma.Config.get([:instance, :description_limit])}, String.length(description) <= Pleroma.Config.get([:instance, :description_limit])},
@ -141,7 +134,7 @@ defp get_opts(opts) do
end end
%{ %{
activity_type: activity_type, activity_type: Keyword.get(opts, :activity_type, activity_type),
size_limit: Keyword.get(opts, :size_limit, size_limit), size_limit: Keyword.get(opts, :size_limit, size_limit),
uploader: Keyword.get(opts, :uploader, Pleroma.Config.get([__MODULE__, :uploader])), uploader: Keyword.get(opts, :uploader, Pleroma.Config.get([__MODULE__, :uploader])),
filters: filters:
@ -161,8 +154,7 @@ defp prepare_upload(%Plug.Upload{} = file, opts) do
id: UUID.generate(), id: UUID.generate(),
name: file.filename, name: file.filename,
tempfile: file.path, tempfile: file.path,
content_type: file.content_type, content_type: file.content_type
description: opts.description
}} }}
end end
end end
@ -182,8 +174,7 @@ defp prepare_upload(%{img: "data:image/" <> image_data}, opts) do
id: UUID.generate(), id: UUID.generate(),
name: hash <> "." <> ext, name: hash <> "." <> ext,
tempfile: tmp_path, tempfile: tmp_path,
content_type: content_type, content_type: content_type
description: opts.description
}} }}
end end
end end
@ -239,6 +230,13 @@ defp url_from_spec(%__MODULE__{name: name}, base_url, {:file, path}) do
defp url_from_spec(_upload, _base_url, {:url, url}), do: url defp url_from_spec(_upload, _base_url, {:url, url}), do: url
if @mix_env == :test do
defp choose_base_url(prim, sec \\ nil),
do: prim || sec || Pleroma.Web.Endpoint.url() <> "/media/"
else
defp choose_base_url(prim, sec \\ nil), do: prim || sec
end
def base_url do def base_url do
uploader = Config.get([Pleroma.Upload, :uploader]) uploader = Config.get([Pleroma.Upload, :uploader])
upload_base_url = Config.get([Pleroma.Upload, :base_url]) upload_base_url = Config.get([Pleroma.Upload, :base_url])
@ -246,7 +244,7 @@ def base_url do
case uploader do case uploader do
Pleroma.Uploaders.Local -> Pleroma.Uploaders.Local ->
upload_base_url choose_base_url(upload_base_url)
Pleroma.Uploaders.S3 -> Pleroma.Uploaders.S3 ->
bucket = Config.get([Pleroma.Uploaders.S3, :bucket]) bucket = Config.get([Pleroma.Uploaders.S3, :bucket])
@ -272,7 +270,7 @@ def base_url do
end end
_ -> _ ->
public_endpoint || upload_base_url choose_base_url(public_endpoint, upload_base_url)
end end
end end
end end

View file

@ -0,0 +1,33 @@
# Pleroma: A lightweight social networking server
# Copyright © 2017-2021 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.Upload.Filter.Exiftool do
@moduledoc """
Strips GPS related EXIF tags and overwrites the file in place.
Also strips or replaces filesystem metadata e.g., timestamps.
"""
@behaviour Pleroma.Upload.Filter
@spec filter(Pleroma.Upload.t()) :: {:ok, :noop} | {:ok, :filtered} | {:error, String.t()}
# Formats not compatible with exiftool at this time
def filter(%Pleroma.Upload{content_type: "image/heic"}), do: {:ok, :noop}
def filter(%Pleroma.Upload{content_type: "image/webp"}), do: {:ok, :noop}
def filter(%Pleroma.Upload{content_type: "image/svg+xml"}), do: {:ok, :noop}
def filter(%Pleroma.Upload{content_type: "image/jxl"}), do: {:ok, :noop}
def filter(%Pleroma.Upload{tempfile: file, content_type: "image" <> _}) do
try do
case System.cmd("exiftool", ["-overwrite_original", "-gps:all=", file], parallelism: true) do
{_response, 0} -> {:ok, :filtered}
{error, 1} -> {:error, error}
end
rescue
e in ErlangError ->
{:error, "#{__MODULE__}: #{inspect(e)}"}
end
end
def filter(_), do: {:ok, :noop}
end

View file

@ -1,51 +0,0 @@
# Pleroma: A lightweight social networking server
# Copyright © 2017-2021 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.Upload.Filter.Exiftool.ReadDescription do
@moduledoc """
Gets a valid description from the related EXIF tags and provides them in the response if no description is provided yet.
It will first check ImageDescription, when that doesn't probide a valid description, it will check iptc:Caption-Abstract.
A valid description means the fields are filled in and not too long (see `:instance, :description_limit`).
"""
@behaviour Pleroma.Upload.Filter
@spec filter(Pleroma.Upload.t()) :: {:ok, any()} | {:error, String.t()}
def filter(%Pleroma.Upload{description: description})
when is_binary(description),
do: {:ok, :noop}
def filter(%Pleroma.Upload{tempfile: file} = upload),
do: {:ok, :filtered, upload |> Map.put(:description, read_description_from_exif_data(file))}
def filter(_, _), do: {:ok, :noop}
defp read_description_from_exif_data(file) do
nil
|> read_when_empty(file, "-ImageDescription")
|> read_when_empty(file, "-iptc:Caption-Abstract")
end
defp read_when_empty(current_description, _, _) when is_binary(current_description),
do: current_description
defp read_when_empty(_, file, tag) do
try do
{tag_content, 0} =
System.cmd("exiftool", ["-b", "-s3", "-ignoreMinorErrors", "-q", "-q", tag, file],
parallelism: true
)
tag_content = String.trim(tag_content)
if tag_content != "" and
String.length(tag_content) <=
Pleroma.Config.get([:instance, :description_limit]),
do: tag_content,
else: nil
rescue
_ in ErlangError -> nil
end
end
end

View file

@ -1,52 +0,0 @@
# Pleroma: A lightweight social networking server
# Copyright © 2017-2021 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.Upload.Filter.Exiftool.StripMetadata do
@moduledoc """
Tries to strip all image metadata but colorspace and orientation overwriting the file in place.
Also strips or replaces filesystem metadata e.g., timestamps.
"""
@behaviour Pleroma.Upload.Filter
alias Pleroma.Config
@purge_default ["all", "CommonIFD0"]
@preserve_default ["ColorSpaceTags", "Orientation"]
@spec filter(Pleroma.Upload.t()) :: {:ok, :noop} | {:ok, :filtered} | {:error, String.t()}
# Formats not compatible with exiftool at this time
def filter(%Pleroma.Upload{content_type: "image/webp"}), do: {:ok, :noop}
def filter(%Pleroma.Upload{content_type: "image/bmp"}), do: {:ok, :noop}
def filter(%Pleroma.Upload{content_type: "image/svg+xml"}), do: {:ok, :noop}
def filter(%Pleroma.Upload{tempfile: file, content_type: "image" <> _}) do
purge_args =
Config.get([__MODULE__, :purge], @purge_default)
|> Enum.map(fn mgroup -> "-" <> mgroup <> "=" end)
preserve_args =
Config.get([__MODULE__, :preserve], @preserve_default)
|> Enum.map(fn mgroup -> "-" <> mgroup end)
|> then(fn
# If -TagsFromFile is not followed by tag selectors, it will copy most available tags
[] -> []
args -> ["-TagsFromFile", "@" | args]
end)
args = ["-ignoreMinorErrors", "-overwrite_original" | purge_args] ++ preserve_args ++ [file]
try do
case System.cmd("exiftool", args, parallelism: true) do
{_response, 0} -> {:ok, :filtered}
{error, 1} -> {:error, error}
end
rescue
e in ErlangError ->
{:error, "#{__MODULE__}: #{inspect(e)}"}
end
end
def filter(_), do: {:ok, :noop}
end

View file

@ -25,6 +25,7 @@ defmodule Pleroma.User do
alias Pleroma.Hashtag alias Pleroma.Hashtag
alias Pleroma.User.HashtagFollow alias Pleroma.User.HashtagFollow
alias Pleroma.HTML alias Pleroma.HTML
alias Pleroma.Keys
alias Pleroma.MFA alias Pleroma.MFA
alias Pleroma.Notification alias Pleroma.Notification
alias Pleroma.Object alias Pleroma.Object
@ -42,7 +43,6 @@ defmodule Pleroma.User do
alias Pleroma.Web.OAuth alias Pleroma.Web.OAuth
alias Pleroma.Web.RelMe alias Pleroma.Web.RelMe
alias Pleroma.Workers.BackgroundWorker alias Pleroma.Workers.BackgroundWorker
alias Pleroma.User.SigningKey
use Pleroma.Web, :verified_routes use Pleroma.Web, :verified_routes
@ -101,6 +101,7 @@ defmodule Pleroma.User do
field(:password, :string, virtual: true) field(:password, :string, virtual: true)
field(:password_confirmation, :string, virtual: true) field(:password_confirmation, :string, virtual: true)
field(:keys, :string) field(:keys, :string)
field(:public_key, :string)
field(:ap_id, :string) field(:ap_id, :string)
field(:avatar, :map, default: %{}) field(:avatar, :map, default: %{})
field(:local, :boolean, default: true) field(:local, :boolean, default: true)
@ -221,10 +222,6 @@ defmodule Pleroma.User do
on_replace: :delete on_replace: :delete
) )
# FOR THE FUTURE: We might want to make this a one-to-many relationship
# it's entirely possible right now, but we don't have a use case for it
has_one(:signing_key, SigningKey, foreign_key: :user_id)
timestamps() timestamps()
end end
@ -443,7 +440,6 @@ defp fix_follower_address(params), do: params
def remote_user_changeset(struct \\ %User{local: false}, params) do def remote_user_changeset(struct \\ %User{local: false}, params) do
bio_limit = Config.get([:instance, :user_bio_length], 5000) bio_limit = Config.get([:instance, :user_bio_length], 5000)
name_limit = Config.get([:instance, :user_name_length], 100) name_limit = Config.get([:instance, :user_name_length], 100)
fields_limit = Config.get([:instance, :max_remote_account_fields], 0)
name = name =
case params[:name] do case params[:name] do
@ -457,12 +453,10 @@ def remote_user_changeset(struct \\ %User{local: false}, params) do
|> Map.put_new(:last_refreshed_at, NaiveDateTime.utc_now()) |> Map.put_new(:last_refreshed_at, NaiveDateTime.utc_now())
|> truncate_if_exists(:name, name_limit) |> truncate_if_exists(:name, name_limit)
|> truncate_if_exists(:bio, bio_limit) |> truncate_if_exists(:bio, bio_limit)
|> Map.update(:fields, [], &Enum.take(&1, fields_limit))
|> truncate_fields_param() |> truncate_fields_param()
|> fix_follower_address() |> fix_follower_address()
struct struct
|> Repo.preload(:signing_key)
|> cast( |> cast(
params, params,
[ [
@ -472,6 +466,7 @@ def remote_user_changeset(struct \\ %User{local: false}, params) do
:inbox, :inbox,
:shared_inbox, :shared_inbox,
:nickname, :nickname,
:public_key,
:avatar, :avatar,
:ap_enabled, :ap_enabled,
:banner, :banner,
@ -500,7 +495,6 @@ def remote_user_changeset(struct \\ %User{local: false}, params) do
|> validate_required([:ap_id]) |> validate_required([:ap_id])
|> validate_required([:name], trim: false) |> validate_required([:name], trim: false)
|> unique_constraint(:nickname) |> unique_constraint(:nickname)
|> cast_assoc(:signing_key, with: &SigningKey.remote_changeset/2, required: false)
|> validate_format(:nickname, @email_regex) |> validate_format(:nickname, @email_regex)
|> validate_length(:bio, max: bio_limit) |> validate_length(:bio, max: bio_limit)
|> validate_length(:name, max: name_limit) |> validate_length(:name, max: name_limit)
@ -532,6 +526,7 @@ def update_changeset(struct, params \\ %{}) do
:name, :name,
:emoji, :emoji,
:avatar, :avatar,
:public_key,
:inbox, :inbox,
:shared_inbox, :shared_inbox,
:is_locked, :is_locked,
@ -575,7 +570,6 @@ def update_changeset(struct, params \\ %{}) do
:pleroma_settings_store, :pleroma_settings_store,
&{:ok, Map.merge(struct.pleroma_settings_store, &1)} &{:ok, Map.merge(struct.pleroma_settings_store, &1)}
) )
|> cast_assoc(:signing_key, with: &SigningKey.remote_changeset/2, requred: false)
|> validate_fields(false, struct) |> validate_fields(false, struct)
end end
@ -834,10 +828,8 @@ def put_following_and_follower_and_featured_address(changeset) do
end end
defp put_private_key(changeset) do defp put_private_key(changeset) do
ap_id = get_field(changeset, :ap_id) {:ok, pem} = Keys.generate_rsa_pem()
put_change(changeset, :keys, pem)
changeset
|> put_assoc(:signing_key, SigningKey.generate_local_keys(ap_id))
end end
defp autofollow_users(user) do defp autofollow_users(user) do
@ -977,16 +969,15 @@ defp maybe_send_registration_email(%User{email: email} = user) when is_binary(em
defp maybe_send_registration_email(_), do: {:ok, :noop} defp maybe_send_registration_email(_), do: {:ok, :noop}
def needs_update?(user, options \\ []) def needs_update?(%User{local: true}), do: false
def needs_update?(%User{local: true}, _options), do: false
def needs_update?(%User{local: false, last_refreshed_at: nil}, _options), do: true
def needs_update?(%User{local: false} = user, options) do def needs_update?(%User{local: false, last_refreshed_at: nil}), do: true
NaiveDateTime.diff(NaiveDateTime.utc_now(), user.last_refreshed_at) >=
Keyword.get(options, :maximum_age, 86_400) def needs_update?(%User{local: false} = user) do
NaiveDateTime.diff(NaiveDateTime.utc_now(), user.last_refreshed_at) >= 86_400
end end
def needs_update?(_, _options), do: true def needs_update?(_), do: true
# "Locked" (self-locked) users demand explicit authorization of follow requests # "Locked" (self-locked) users demand explicit authorization of follow requests
@spec can_direct_follow_local(User.t(), User.t()) :: true | false @spec can_direct_follow_local(User.t(), User.t()) :: true | false
@ -1154,8 +1145,7 @@ def update_and_set_cache(%{data: %Pleroma.User{} = user} = changeset) do
was_superuser_before_update = User.superuser?(user) was_superuser_before_update = User.superuser?(user)
with {:ok, user} <- Repo.update(changeset, stale_error_field: :id) do with {:ok, user} <- Repo.update(changeset, stale_error_field: :id) do
user set_cache(user)
|> set_cache()
end end
|> maybe_remove_report_notifications(was_superuser_before_update) |> maybe_remove_report_notifications(was_superuser_before_update)
end end
@ -1633,12 +1623,8 @@ def blocks_user?(%User{} = user, %User{} = target) do
def blocks_user?(_, _), do: false def blocks_user?(_, _), do: false
def blocks_domain?(%User{} = user, %User{ap_id: ap_id}) do def blocks_domain?(%User{} = user, %User{} = target) do
blocks_domain?(user, ap_id) %{host: host} = URI.parse(target.ap_id)
end
def blocks_domain?(%User{} = user, url) when is_binary(url) do
%{host: host} = URI.parse(url)
Enum.member?(user.domain_blocks, host) Enum.member?(user.domain_blocks, host)
# TODO: functionality should probably be changed such that subdomains block as well, # TODO: functionality should probably be changed such that subdomains block as well,
# but as it stands, this just hecks up the relationships endpoint # but as it stands, this just hecks up the relationships endpoint
@ -1994,10 +1980,10 @@ def html_filter_policy(_), do: Config.get([:markup, :scrub_policy])
def fetch_by_ap_id(ap_id), do: ActivityPub.make_user_from_ap_id(ap_id) def fetch_by_ap_id(ap_id), do: ActivityPub.make_user_from_ap_id(ap_id)
def get_or_fetch_by_ap_id(ap_id, options \\ []) do def get_or_fetch_by_ap_id(ap_id) do
cached_user = get_cached_by_ap_id(ap_id) cached_user = get_cached_by_ap_id(ap_id)
maybe_fetched_user = needs_update?(cached_user, options) && fetch_by_ap_id(ap_id) maybe_fetched_user = needs_update?(cached_user) && fetch_by_ap_id(ap_id)
case {cached_user, maybe_fetched_user} do case {cached_user, maybe_fetched_user} do
{_, {:ok, %User{} = user}} -> {_, {:ok, %User{} = user}} ->
@ -2060,16 +2046,24 @@ defp create_service_actor(uri, nickname) do
|> set_cache() |> set_cache()
end end
defdelegate public_key(user), to: SigningKey def public_key(%{public_key: public_key_pem}) when is_binary(public_key_pem) do
key =
public_key_pem
|> :public_key.pem_decode()
|> hd()
|> :public_key.pem_entry_decode()
{:ok, key}
end
def public_key(_), do: {:error, "key not found"}
def get_public_key_for_ap_id(ap_id) do def get_public_key_for_ap_id(ap_id) do
with {:ok, %User{} = user} <- get_or_fetch_by_ap_id(ap_id), with {:ok, %User{} = user} <- get_or_fetch_by_ap_id(ap_id),
{:ok, public_key} <- SigningKey.public_key(user) do {:ok, public_key} <- public_key(user) do
{:ok, public_key} {:ok, public_key}
else else
e -> _ -> :error
Logger.error("Could not get public key for #{ap_id}.\n#{inspect(e)}")
{:error, e}
end end
end end

View file

@ -1,262 +0,0 @@
defmodule Pleroma.User.SigningKey do
use Ecto.Schema
import Ecto.Query
import Ecto.Changeset
require Pleroma.Constants
alias Pleroma.User
alias Pleroma.Repo
require Logger
@primary_key false
schema "signing_keys" do
belongs_to(:user, Pleroma.User, type: FlakeId.Ecto.CompatType)
field :public_key, :string
field :private_key, :string
# This is an arbitrary field given by the remote instance
field :key_id, :string, primary_key: true
timestamps()
end
def load_key(%User{} = user) do
user
|> Repo.preload(:signing_key)
end
def key_id_of_local_user(%User{local: true} = user) do
case Repo.preload(user, :signing_key) do
%User{signing_key: %__MODULE__{key_id: key_id}} -> key_id
_ -> nil
end
end
@spec remote_changeset(__MODULE__, map) :: Changeset.t()
def remote_changeset(%__MODULE__{} = signing_key, attrs) do
signing_key
|> cast(attrs, [:public_key, :key_id])
|> validate_required([:public_key, :key_id])
end
@spec key_id_to_user_id(String.t()) :: String.t() | nil
@doc """
Given a key ID, return the user ID associated with that key.
Returns nil if the key ID is not found.
"""
def key_id_to_user_id(key_id) do
from(sk in __MODULE__, where: sk.key_id == ^key_id)
|> select([sk], sk.user_id)
|> Repo.one()
end
@spec key_id_to_ap_id(String.t()) :: String.t() | nil
@doc """
Given a key ID, return the AP ID associated with that key.
Returns nil if the key ID is not found.
"""
def key_id_to_ap_id(key_id) do
Logger.debug("Looking up key ID: #{key_id}")
result =
from(sk in __MODULE__, where: sk.key_id == ^key_id)
|> join(:inner, [sk], u in User, on: sk.user_id == u.id)
|> select([sk, u], %{user: u})
|> Repo.one()
case result do
%{user: %User{ap_id: ap_id}} -> ap_id
_ -> nil
end
end
@spec generate_rsa_pem() :: {:ok, binary()}
@doc """
Generate a new RSA private key and return it as a PEM-encoded string.
"""
def generate_rsa_pem do
key = :public_key.generate_key({:rsa, 2048, 65_537})
entry = :public_key.pem_entry_encode(:RSAPrivateKey, key)
pem = :public_key.pem_encode([entry]) |> String.trim_trailing()
{:ok, pem}
end
@spec generate_local_keys(String.t()) :: {:ok, Changeset.t()} | {:error, String.t()}
@doc """
Generate a new RSA key pair and create a changeset for it
"""
def generate_local_keys(ap_id) do
{:ok, private_pem} = generate_rsa_pem()
{:ok, local_pem} = private_pem_to_public_pem(private_pem)
%__MODULE__{}
|> change()
|> put_change(:public_key, local_pem)
|> put_change(:private_key, private_pem)
|> put_change(:key_id, local_key_id(ap_id))
end
@spec local_key_id(String.t()) :: String.t()
@doc """
Given an AP ID, return the key ID for the local user.
"""
def local_key_id(ap_id) do
ap_id <> "#main-key"
end
@spec private_pem_to_public_pem(binary) :: {:ok, binary()} | {:error, String.t()}
@doc """
Given a private key in PEM format, return the corresponding public key in PEM format.
"""
def private_pem_to_public_pem(private_pem) do
[private_key_code] = :public_key.pem_decode(private_pem)
private_key = :public_key.pem_entry_decode(private_key_code)
{:RSAPrivateKey, _, modulus, exponent, _, _, _, _, _, _, _} = private_key
public_key = {:RSAPublicKey, modulus, exponent}
public_key = :public_key.pem_entry_encode(:SubjectPublicKeyInfo, public_key)
{:ok, :public_key.pem_encode([public_key])}
end
@spec public_key(User.t()) :: {:ok, binary()} | {:error, String.t()}
@doc """
Given a user, return the public key for that user in binary format.
"""
def public_key(%User{} = user) do
case Repo.preload(user, :signing_key) do
%User{signing_key: %__MODULE__{public_key: public_key_pem}} ->
key =
public_key_pem
|> :public_key.pem_decode()
|> hd()
|> :public_key.pem_entry_decode()
{:ok, key}
_ ->
{:error, "key not found"}
end
end
def public_key(_), do: {:error, "key not found"}
def public_key_pem(%User{} = user) do
case Repo.preload(user, :signing_key) do
%User{signing_key: %__MODULE__{public_key: public_key_pem}} -> {:ok, public_key_pem}
_ -> {:error, "key not found"}
end
end
def public_key_pem(_e) do
{:error, "key not found"}
end
@spec private_key(User.t()) :: {:ok, binary()} | {:error, String.t()}
@doc """
Given a user, return the private key for that user in binary format.
"""
def private_key(%User{} = user) do
case Repo.preload(user, :signing_key) do
%{signing_key: %__MODULE__{private_key: private_key_pem}} ->
key =
private_key_pem
|> :public_key.pem_decode()
|> hd()
|> :public_key.pem_entry_decode()
{:ok, key}
_ ->
{:error, "key not found"}
end
end
@spec get_or_fetch_by_key_id(String.t()) :: {:ok, __MODULE__} | {:error, String.t()}
@doc """
Given a key ID, return the signing key associated with that key.
Will either return the key if it exists locally, or fetch it from the remote instance.
"""
def get_or_fetch_by_key_id(key_id) do
case key_id_to_user_id(key_id) do
nil ->
fetch_remote_key(key_id)
user_id ->
{:ok, Repo.get_by(__MODULE__, user_id: user_id)}
end
end
@spec fetch_remote_key(String.t()) :: {:ok, __MODULE__} | {:error, String.t()}
@doc """
Fetch a remote key by key ID.
Will send a request to the remote instance to get the key ID.
This request should, at the very least, return a user ID and a public key object.
Though bear in mind that some implementations (looking at you, pleroma) may require a signature for this request.
This has the potential to create an infinite loop if the remote instance requires a signature to fetch the key...
So if we're rejected, we should probably just give up.
"""
def fetch_remote_key(key_id) do
Logger.debug("Fetching remote key: #{key_id}")
resp = Pleroma.Object.Fetcher.fetch_and_contain_remote_object_from_id(key_id)
case resp do
{:ok, _body} ->
case handle_signature_response(resp) do
{:ok, ap_id, public_key_pem} ->
Logger.debug("Fetched remote key: #{ap_id}")
# fetch the user
{:ok, user} = User.get_or_fetch_by_ap_id(ap_id)
# store the key
key = %__MODULE__{
user_id: user.id,
public_key: public_key_pem,
key_id: key_id
}
Repo.insert(key, on_conflict: :replace_all, conflict_target: :key_id)
e ->
Logger.debug("Failed to fetch remote key: #{inspect(e)}")
{:error, "Could not fetch key"}
end
_ ->
Logger.debug("Failed to fetch remote key: #{inspect(resp)}")
{:error, "Could not fetch key"}
end
end
# Take the response from the remote instance and extract the key details
# will check if the key ID matches the owner of the key, if not, error
defp extract_key_details(%{"id" => ap_id, "publicKey" => public_key}) do
if ap_id !== public_key["owner"] do
{:error, "Key ID does not match owner"}
else
%{"publicKeyPem" => public_key_pem} = public_key
{:ok, ap_id, public_key_pem}
end
end
defp handle_signature_response({:ok, body}) do
case body do
%{
"type" => "CryptographicKey",
"publicKeyPem" => public_key_pem,
"owner" => ap_id
} ->
{:ok, ap_id, public_key_pem}
# for when we get a subset of the user object
%{
"id" => _user_id,
"publicKey" => _public_key,
"type" => actor_type
}
when actor_type in Pleroma.Constants.actor_types() ->
extract_key_details(body)
%{"error" => error} ->
{:error, error}
end
end
defp handle_signature_response({:error, e}), do: {:error, e}
defp handle_signature_response(other), do: {:error, "Could not fetch key: #{inspect(other)}"}
end

View file

@ -155,7 +155,9 @@ def insert(map, local \\ true, fake \\ false, bypass_actor_check \\ false) when
# Splice in the child object if we have one. # Splice in the child object if we have one.
activity = Maps.put_if_present(activity, :object, object) activity = Maps.put_if_present(activity, :object, object)
Pleroma.Web.RichMedia.Card.get_by_activity(activity) ConcurrentLimiter.limit(Pleroma.Web.RichMedia.Helpers, fn ->
Task.start(fn -> Pleroma.Web.RichMedia.Helpers.fetch_data_for_activity(activity) end)
end)
# Add local posts to search index # Add local posts to search index
if local, do: Pleroma.Search.add_to_index(activity) if local, do: Pleroma.Search.add_to_index(activity)
@ -183,7 +185,7 @@ def insert(map, local \\ true, fake \\ false, bypass_actor_check \\ false) when
id: "pleroma:fakeid" id: "pleroma:fakeid"
} }
Pleroma.Web.RichMedia.Card.get_by_activity(activity) Pleroma.Web.RichMedia.Helpers.fetch_data_for_activity(activity)
{:ok, activity} {:ok, activity}
{:remote_limit_pass, _} -> {:remote_limit_pass, _} ->
@ -1543,30 +1545,11 @@ defp normalize_also_known_as(aka) when is_list(aka), do: aka
defp normalize_also_known_as(aka) when is_binary(aka), do: [aka] defp normalize_also_known_as(aka) when is_binary(aka), do: [aka]
defp normalize_also_known_as(nil), do: [] defp normalize_also_known_as(nil), do: []
defp normalize_attachment(%{} = attachment), do: [attachment]
defp normalize_attachment(attachment) when is_list(attachment), do: attachment
defp normalize_attachment(_), do: []
defp maybe_make_public_key_object(data) do
if is_map(data["publicKey"]) && is_binary(data["publicKey"]["publicKeyPem"]) do
%{
public_key: data["publicKey"]["publicKeyPem"],
key_id: data["publicKey"]["id"]
}
else
nil
end
end
defp object_to_user_data(data, additional) do defp object_to_user_data(data, additional) do
fields = fields =
data data
|> Map.get("attachment", []) |> Map.get("attachment", [])
|> normalize_attachment() |> Enum.filter(fn %{"type" => t} -> t == "PropertyValue" end)
|> Enum.filter(fn
%{"type" => t} -> t == "PropertyValue"
_ -> false
end)
|> Enum.map(fn fields -> Map.take(fields, ["name", "value"]) end) |> Enum.map(fn fields -> Map.take(fields, ["name", "value"]) end)
emojis = emojis =
@ -1589,16 +1572,9 @@ defp object_to_user_data(data, additional) do
featured_address = data["featured"] featured_address = data["featured"]
{:ok, pinned_objects} = fetch_and_prepare_featured_from_ap_id(featured_address) {:ok, pinned_objects} = fetch_and_prepare_featured_from_ap_id(featured_address)
# first, check that the owner is correct public_key =
signing_key = if is_map(data["publicKey"]) && is_binary(data["publicKey"]["publicKeyPem"]) do
if data["id"] !== data["publicKey"]["owner"] do data["publicKey"]["publicKeyPem"]
Logger.error(
"Owner of the public key is not the same as the actor - not saving the public key."
)
nil
else
maybe_make_public_key_object(data)
end end
shared_inbox = shared_inbox =
@ -1642,7 +1618,7 @@ defp object_to_user_data(data, additional) do
bio: data["summary"] || "", bio: data["summary"] || "",
actor_type: actor_type, actor_type: actor_type,
also_known_as: also_known_as, also_known_as: also_known_as,
signing_key: signing_key, public_key: public_key,
inbox: data["inbox"], inbox: data["inbox"],
shared_inbox: shared_inbox, shared_inbox: shared_inbox,
pinned_objects: pinned_objects, pinned_objects: pinned_objects,
@ -1729,7 +1705,9 @@ defp collection_private(%{"first" => first}) do
Fetcher.fetch_and_contain_remote_object_from_id(first) do Fetcher.fetch_and_contain_remote_object_from_id(first) do
{:ok, false} {:ok, false}
else else
{:error, _} -> {:ok, true} {:error, {:ok, %{status: code}}} when code in [401, 403] -> {:ok, true}
{:error, _} = e -> e
e -> {:error, e}
end end
end end
@ -1754,7 +1732,7 @@ def fetch_and_prepare_user_from_ap_id(ap_id, additional \\ []) do
Logger.debug("Could not decode user at fetch #{ap_id}, #{inspect(e)}") Logger.debug("Could not decode user at fetch #{ap_id}, #{inspect(e)}")
{:error, e} {:error, e}
{:reject, reason} = e -> {:error, {:reject, reason} = e} ->
Logger.debug("Rejected user #{ap_id}: #{inspect(reason)}") Logger.debug("Rejected user #{ap_id}: #{inspect(reason)}")
{:error, e} {:error, e}
@ -1840,19 +1818,18 @@ def fetch_and_prepare_featured_from_ap_id(ap_id) do
end end
end end
def enqueue_pin_fetches(%{pinned_objects: pins}) do def pinned_fetch_task(nil), do: nil
# enqueue a task to fetch all pinned objects
Enum.each(pins, fn {ap_id, _} ->
if is_nil(Object.get_cached_by_ap_id(ap_id)) do
Pleroma.Workers.RemoteFetcherWorker.enqueue("fetch_remote", %{
"id" => ap_id,
"depth" => 1
})
end
end)
end
def enqueue_pin_fetches(_), do: nil def pinned_fetch_task(%{pinned_objects: pins}) do
if Enum.all?(pins, fn {ap_id, _} ->
Object.get_cached_by_ap_id(ap_id) ||
match?({:ok, _object}, Fetcher.fetch_object_from_id(ap_id))
end) do
:ok
else
:error
end
end
def make_user_from_ap_id(ap_id, additional \\ []) do def make_user_from_ap_id(ap_id, additional \\ []) do
user = User.get_cached_by_ap_id(ap_id) user = User.get_cached_by_ap_id(ap_id)
@ -1861,6 +1838,8 @@ def make_user_from_ap_id(ap_id, additional \\ []) do
Transmogrifier.upgrade_user_from_ap_id(ap_id) Transmogrifier.upgrade_user_from_ap_id(ap_id)
else else
with {:ok, data} <- fetch_and_prepare_user_from_ap_id(ap_id, additional) do with {:ok, data} <- fetch_and_prepare_user_from_ap_id(ap_id, additional) do
{:ok, _pid} = Task.start(fn -> pinned_fetch_task(data) end)
user = user =
if data.ap_id != ap_id do if data.ap_id != ap_id do
User.get_cached_by_ap_id(data.ap_id) User.get_cached_by_ap_id(data.ap_id)
@ -1872,7 +1851,6 @@ def make_user_from_ap_id(ap_id, additional \\ []) do
user user
|> User.remote_user_changeset(data) |> User.remote_user_changeset(data)
|> User.update_and_set_cache() |> User.update_and_set_cache()
|> tap(fn _ -> enqueue_pin_fetches(data) end)
else else
maybe_handle_clashing_nickname(data) maybe_handle_clashing_nickname(data)
@ -1880,7 +1858,6 @@ def make_user_from_ap_id(ap_id, additional \\ []) do
|> User.remote_user_changeset() |> User.remote_user_changeset()
|> Repo.insert() |> Repo.insert()
|> User.set_cache() |> User.set_cache()
|> tap(fn _ -> enqueue_pin_fetches(data) end)
end end
end end
end end

View file

@ -12,7 +12,9 @@ defmodule Pleroma.Web.ActivityPub.ActivityPubController do
alias Pleroma.Web.ActivityPub.ActivityPub alias Pleroma.Web.ActivityPub.ActivityPub
alias Pleroma.Web.ActivityPub.InternalFetchActor alias Pleroma.Web.ActivityPub.InternalFetchActor
alias Pleroma.Web.ActivityPub.ObjectView alias Pleroma.Web.ActivityPub.ObjectView
alias Pleroma.Web.ActivityPub.Pipeline
alias Pleroma.Web.ActivityPub.Relay alias Pleroma.Web.ActivityPub.Relay
alias Pleroma.Web.ActivityPub.Transmogrifier
alias Pleroma.Web.ActivityPub.UserView alias Pleroma.Web.ActivityPub.UserView
alias Pleroma.Web.ActivityPub.Utils alias Pleroma.Web.ActivityPub.Utils
alias Pleroma.Web.ActivityPub.Visibility alias Pleroma.Web.ActivityPub.Visibility
@ -38,9 +40,11 @@ defmodule Pleroma.Web.ActivityPub.ActivityPubController do
# Note: :following and :followers must be served even without authentication (as via :api) # Note: :following and :followers must be served even without authentication (as via :api)
plug( plug(
EnsureAuthenticatedPlug EnsureAuthenticatedPlug
when action in [:read_inbox] when action in [:read_inbox, :update_outbox, :whoami, :upload_media]
) )
plug(Majic.Plug, [pool: Pleroma.MajicPool] when action in [:upload_media])
plug( plug(
Pleroma.Web.Plugs.Cache, Pleroma.Web.Plugs.Cache,
[query_params: false, tracking_fun: &__MODULE__.track_object_fetch/2] [query_params: false, tracking_fun: &__MODULE__.track_object_fetch/2]
@ -60,26 +64,7 @@ defp relay_active?(conn, _) do
end end
end end
@doc """ def user(conn, %{"nickname" => nickname}) do
Render the user's AP data
WARNING: we cannot actually check if the request has a fragment! so let's play defensively
- IF we have a valid signature, serve full user
- IF we do not, and authorized_fetch_mode is enabled, serve the key only
- OTHERWISE, serve the full actor (since we don't need to worry about the signature)
"""
def user(%{assigns: %{valid_signature: true}} = conn, params) do
render_full_user(conn, params)
end
def user(conn, params) do
if Pleroma.Config.get([:activitypub, :authorized_fetch_mode], false) do
render_key_only_user(conn, params)
else
render_full_user(conn, params)
end
end
defp render_full_user(conn, %{"nickname" => nickname}) do
with %User{local: true} = user <- User.get_cached_by_nickname(nickname) do with %User{local: true} = user <- User.get_cached_by_nickname(nickname) do
conn conn
|> put_resp_content_type("application/activity+json") |> put_resp_content_type("application/activity+json")
@ -91,18 +76,6 @@ defp render_full_user(conn, %{"nickname" => nickname}) do
end end
end end
def render_key_only_user(conn, %{"nickname" => nickname}) do
with %User{local: true} = user <- User.get_cached_by_nickname(nickname) do
conn
|> put_resp_content_type("application/activity+json")
|> put_view(UserView)
|> render("keys.json", %{user: user})
else
nil -> {:error, :not_found}
%{local: false} -> {:error, :not_found}
end
end
def object(%{assigns: assigns} = conn, _) do def object(%{assigns: assigns} = conn, _) do
with ap_id <- Endpoint.url() <> conn.request_path, with ap_id <- Endpoint.url() <> conn.request_path,
%Object{} = object <- Object.get_cached_by_ap_id(ap_id), %Object{} = object <- Object.get_cached_by_ap_id(ap_id),
@ -187,9 +160,7 @@ def maybe_skip_cache(conn, user) do
end end
end end
@doc """ # GET /relay/following
GET /relay/following
"""
def relay_following(conn, _params) do def relay_following(conn, _params) do
with %{halted: false} = conn <- FederatingPlug.call(conn, []) do with %{halted: false} = conn <- FederatingPlug.call(conn, []) do
conn conn
@ -226,9 +197,7 @@ def following(%{assigns: %{user: for_user}} = conn, %{"nickname" => nickname}) d
end end
end end
@doc """ # GET /relay/followers
GET /relay/followers
"""
def relay_followers(conn, _params) do def relay_followers(conn, _params) do
with %{halted: false} = conn <- FederatingPlug.call(conn, []) do with %{halted: false} = conn <- FederatingPlug.call(conn, []) do
conn conn
@ -348,6 +317,14 @@ def internal_fetch(conn, _params) do
|> represent_service_actor(conn) |> represent_service_actor(conn)
end end
@doc "Returns the authenticated user's ActivityPub User object or a 404 Not Found if non-authenticated"
def whoami(%{assigns: %{user: %User{} = user}} = conn, _params) do
conn
|> put_resp_content_type("application/activity+json")
|> put_view(UserView)
|> render("user.json", %{user: user})
end
def read_inbox( def read_inbox(
%{assigns: %{user: %User{nickname: nickname} = user}} = conn, %{assigns: %{user: %User{nickname: nickname} = user}} = conn,
%{"nickname" => nickname, "page" => page?} = params %{"nickname" => nickname, "page" => page?} = params
@ -398,6 +375,105 @@ def read_inbox(%{assigns: %{user: %User{nickname: as_nickname}}} = conn, %{
|> json(err) |> json(err)
end end
defp fix_user_message(%User{ap_id: actor}, %{"type" => "Create", "object" => object} = activity)
when is_map(object) do
length =
[object["content"], object["summary"], object["name"]]
|> Enum.filter(&is_binary(&1))
|> Enum.join("")
|> String.length()
limit = Pleroma.Config.get([:instance, :limit])
if length < limit do
object =
object
|> Transmogrifier.strip_internal_fields()
|> Map.put("attributedTo", actor)
|> Map.put("actor", actor)
|> Map.put("id", Utils.generate_object_id())
{:ok, Map.put(activity, "object", object)}
else
{:error,
dgettext(
"errors",
"Character limit (%{limit} characters) exceeded, contains %{length} characters",
limit: limit,
length: length
)}
end
end
defp fix_user_message(
%User{ap_id: actor} = user,
%{"type" => "Delete", "object" => object} = activity
) do
with {_, %Object{data: object_data}} <- {:normalize, Object.normalize(object, fetch: false)},
{_, true} <- {:permission, user.is_moderator || actor == object_data["actor"]} do
{:ok, activity}
else
{:normalize, _} ->
{:error, "No such object found"}
{:permission, _} ->
{:forbidden, "You can't delete this object"}
end
end
defp fix_user_message(%User{}, activity) do
{:ok, activity}
end
def update_outbox(
%{assigns: %{user: %User{nickname: nickname, ap_id: actor} = user}} = conn,
%{"nickname" => nickname} = params
) do
params =
params
|> Map.drop(["nickname"])
|> Map.put("id", Utils.generate_activity_id())
|> Map.put("actor", actor)
with {:ok, params} <- fix_user_message(user, params),
{:ok, activity, _} <- Pipeline.common_pipeline(params, local: true),
%Activity{data: activity_data} <- Activity.normalize(activity) do
conn
|> put_status(:created)
|> put_resp_header("location", activity_data["id"])
|> json(activity_data)
else
{:forbidden, message} ->
conn
|> put_status(:forbidden)
|> json(message)
{:error, message} ->
conn
|> put_status(:bad_request)
|> json(message)
e ->
Logger.warning(fn -> "AP C2S: #{inspect(e)}" end)
conn
|> put_status(:bad_request)
|> json("Bad Request")
end
end
def update_outbox(%{assigns: %{user: %User{} = user}} = conn, %{"nickname" => nickname}) do
err =
dgettext("errors", "can't update outbox of %{nickname} as %{as_nickname}",
nickname: nickname,
as_nickname: user.nickname
)
conn
|> put_status(:forbidden)
|> json(err)
end
defp errors(conn, {:error, :not_found}) do defp errors(conn, {:error, :not_found}) do
conn conn
|> put_status(:not_found) |> put_status(:not_found)
@ -419,6 +495,21 @@ defp set_requester_reachable(%Plug.Conn{} = conn, _) do
conn conn
end end
def upload_media(%{assigns: %{user: %User{} = user}} = conn, %{"file" => file} = data) do
with {:ok, object} <-
ActivityPub.upload(
file,
actor: User.ap_id(user),
description: Map.get(data, "description")
) do
Logger.debug(inspect(object))
conn
|> put_status(:created)
|> json(object.data)
end
end
def pinned(conn, %{"nickname" => nickname}) do def pinned(conn, %{"nickname" => nickname}) do
with %User{} = user <- User.get_cached_by_nickname(nickname) do with %User{} = user <- User.get_cached_by_nickname(nickname) do
conn conn

View file

@ -233,7 +233,7 @@ def config_descriptions(policies) do
if function_exported?(policy, :config_description, 0) do if function_exported?(policy, :config_description, 0) do
description = description =
@default_description @default_description
|> Map.merge(policy.config_description()) |> Map.merge(policy.config_description)
|> Map.put(:group, :pleroma) |> Map.put(:group, :pleroma)
|> Map.put(:tab, :mrf) |> Map.put(:tab, :mrf)
|> Map.put(:type, :group) |> Map.put(:type, :group)

View file

@ -6,29 +6,14 @@ defmodule Pleroma.Web.ActivityPub.MRF.InlineQuotePolicy do
@moduledoc "Force a quote line into the message content." @moduledoc "Force a quote line into the message content."
@behaviour Pleroma.Web.ActivityPub.MRF.Policy @behaviour Pleroma.Web.ActivityPub.MRF.Policy
alias Pleroma.Object
defp build_inline_quote(prefix, url) do defp build_inline_quote(prefix, url) do
"<span class=\"quote-inline\"><br/><br/>#{prefix}: <a href=\"#{url}\">#{url}</a></span>" "<span class=\"quote-inline\"><br/><br/>#{prefix}: <a href=\"#{url}\">#{url}</a></span>"
end end
defp resolve_urls(quote_url) do defp has_inline_quote?(content, quote_url) do
# Fetching here can cause infinite recursion as we run this logic on inbound objects too
# This is probably not a problem - its an exceptional corner case for a local user to quote
# a post which doesn't exist
with %Object{} = obj <- Object.normalize(quote_url, fetch: false) do
id = obj.data["id"]
url = Map.get(obj.data, "url", id)
{id, url, [id, url, quote_url]}
else
_ -> {quote_url, quote_url, [quote_url]}
end
end
defp has_inline_quote?(content, urls) do
cond do cond do
# Does the quote URL exist in the content? # Does the quote URL exist in the content?
Enum.any?(urls, fn url -> content =~ url end) -> true content =~ quote_url -> true
# Does the content already have a .quote-inline span? # Does the content already have a .quote-inline span?
content =~ "<span class=\"quote-inline\">" -> true content =~ "<span class=\"quote-inline\">" -> true
# No inline quote found # No inline quote found
@ -37,22 +22,18 @@ defp has_inline_quote?(content, urls) do
end end
defp filter_object(%{"quoteUri" => quote_url} = object) do defp filter_object(%{"quoteUri" => quote_url} = object) do
{id, preferred_url, all_urls} = resolve_urls(quote_url)
object = Map.put(object, "quoteUri", id)
content = object["content"] || "" content = object["content"] || ""
if has_inline_quote?(content, all_urls) do if has_inline_quote?(content, quote_url) do
object object
else else
prefix = Pleroma.Config.get([:mrf_inline_quote, :prefix]) prefix = Pleroma.Config.get([:mrf_inline_quote, :prefix])
content = content =
if String.ends_with?(content, "</p>") do if String.ends_with?(content, "</p>") do
String.trim_trailing(content, "</p>") <> String.trim_trailing(content, "</p>") <> build_inline_quote(prefix, quote_url) <> "</p>"
build_inline_quote(prefix, preferred_url) <> "</p>"
else else
content <> build_inline_quote(prefix, preferred_url) content <> build_inline_quote(prefix, quote_url)
end end
Map.put(object, "content", content) Map.put(object, "content", content)

View file

@ -101,19 +101,10 @@ defp get_extension_if_safe(response) do
end end
end end
defp get_int_header(headers, header_name, default \\ nil) do
with rawval when rawval != :undefined <- :proplists.get_value(header_name, headers),
{int, ""} <- Integer.parse(rawval) do
int
else
_ -> default
end
end
defp is_remote_size_within_limit?(url) do defp is_remote_size_within_limit?(url) do
with {:ok, %{status: status, headers: headers} = _response} when status in 200..299 <- with {:ok, %{status: status, headers: headers} = _response} when status in 200..299 <-
Pleroma.HTTP.request(:head, url, nil, [], []) do Pleroma.HTTP.request(:head, url, nil, [], []) do
content_length = get_int_header(headers, "content-length") content_length = :proplists.get_value("content-length", headers, nil)
size_limit = Config.get([:mrf_steal_emoji, :size_limit], @size_limit) size_limit = Config.get([:mrf_steal_emoji, :size_limit], @size_limit)
accept_unknown = accept_unknown =
@ -181,7 +172,7 @@ def filter(message), do: {:ok, message}
description: <<_::272, _::_*256>>, description: <<_::272, _::_*256>>,
key: :hosts | :rejected_shortcodes | :size_limit, key: :hosts | :rejected_shortcodes | :size_limit,
suggestions: [any(), ...], suggestions: [any(), ...],
type: {:list, :string} | {:list, :string} | :integer | :boolean type: {:list, :string} | {:list, :string} | :integer
}, },
... ...
], ],
@ -218,12 +209,6 @@ def config_description do
type: :integer, type: :integer,
description: "File size limit (in bytes), checked before an emoji is saved to the disk", description: "File size limit (in bytes), checked before an emoji is saved to the disk",
suggestions: ["100000"] suggestions: ["100000"]
},
%{
key: :download_unknown_size,
type: :boolean,
description: "Whether to download emoji if size can't be determined ahead of time",
suggestions: [false, true]
} }
] ]
} }

View file

@ -53,13 +53,6 @@ def cast_data(data) do
defp fix_url(%{"url" => url} = data) when is_bitstring(url), do: data defp fix_url(%{"url" => url} = data) when is_bitstring(url), do: data
defp fix_url(%{"url" => url} = data) when is_map(url), do: Map.put(data, "url", url["href"]) defp fix_url(%{"url" => url} = data) when is_map(url), do: Map.put(data, "url", url["href"])
defp fix_url(%{"url" => url} = data) when is_list(url) do
data
|> Map.put("url", List.first(url))
|> fix_url()
end
defp fix_url(data), do: data defp fix_url(data), do: data
defp fix_tag(%{"tag" => tag} = data) when is_list(tag) do defp fix_tag(%{"tag" => tag} = data) when is_list(tag) do

View file

@ -8,7 +8,6 @@ defmodule Pleroma.Web.ActivityPub.ObjectValidators.EmojiReactValidator do
alias Pleroma.Emoji alias Pleroma.Emoji
alias Pleroma.Object alias Pleroma.Object
alias Pleroma.Web.ActivityPub.ObjectValidators.CommonFixes alias Pleroma.Web.ActivityPub.ObjectValidators.CommonFixes
alias Pleroma.Web.ActivityPub.Transmogrifier
import Ecto.Changeset import Ecto.Changeset
import Pleroma.Web.ActivityPub.ObjectValidators.CommonValidations import Pleroma.Web.ActivityPub.ObjectValidators.CommonValidations
@ -53,7 +52,6 @@ def changeset(struct, data) do
defp fix(data) do defp fix(data) do
data = data =
data data
|> Transmogrifier.fix_tag()
|> fix_emoji_qualification() |> fix_emoji_qualification()
|> CommonFixes.fix_actor() |> CommonFixes.fix_actor()
|> CommonFixes.fix_activity_addressing() |> CommonFixes.fix_activity_addressing()

View file

@ -14,15 +14,15 @@ defmodule Pleroma.Web.ActivityPub.ObjectValidators.UserValidator do
@behaviour Pleroma.Web.ActivityPub.ObjectValidator.Validating @behaviour Pleroma.Web.ActivityPub.ObjectValidator.Validating
alias Pleroma.Object.Containment alias Pleroma.Object.Containment
alias Pleroma.Signature
require Pleroma.Constants
@impl true @impl true
def validate(object, meta) def validate(object, meta)
def validate(%{"type" => type, "id" => _id} = data, meta) def validate(%{"type" => type, "id" => _id} = data, meta)
when type in Pleroma.Constants.actor_types() do when type in ["Person", "Organization", "Group", "Application"] do
with :ok <- validate_inbox(data), with :ok <- validate_pubkey(data),
:ok <- validate_inbox(data),
:ok <- contain_collection_origin(data) do :ok <- contain_collection_origin(data) do
{:ok, data, meta} {:ok, data, meta}
else else
@ -33,6 +33,33 @@ def validate(%{"type" => type, "id" => _id} = data, meta)
def validate(_, _), do: {:error, "Not a user object"} def validate(_, _), do: {:error, "Not a user object"}
defp mabye_validate_owner(nil, _actor), do: :ok
defp mabye_validate_owner(actor, actor), do: :ok
defp mabye_validate_owner(_owner, _actor), do: :error
defp validate_pubkey(
%{"id" => id, "publicKey" => %{"id" => pk_id, "publicKeyPem" => _key}} = data
)
when id != nil do
with {_, {:ok, kactor}} <- {:key, Signature.key_id_to_actor_id(pk_id)},
true <- id == kactor,
:ok <- mabye_validate_owner(Map.get(data, "owner"), id) do
:ok
else
{:key, _} ->
{:error, "Unable to determine actor id from key id"}
false ->
{:error, "Key id does not relate to user id"}
_ ->
{:error, "Actor does not own its public key"}
end
end
# pubkey is optional atm
defp validate_pubkey(_data), do: :ok
defp validate_inbox(%{"id" => id, "inbox" => inbox}) do defp validate_inbox(%{"id" => id, "inbox" => inbox}) do
case Containment.same_origin(id, inbox) do case Containment.same_origin(id, inbox) do
:ok -> :ok :ok -> :ok

Some files were not shown because too many files have changed in this diff Show more