Merge remote-tracking branch 'origin/develop' into benchmark-finishing

This commit is contained in:
lain 2019-10-10 14:40:59 +02:00
commit c54ae662dc
585 changed files with 22068 additions and 16479 deletions

View file

@ -1,3 +1,3 @@
[ [
inputs: ["mix.exs", "{config,lib,test}/**/*.{ex,exs}"] inputs: ["mix.exs", "{config,lib,test}/**/*.{ex,exs}", "priv/repo/migrations/*.exs"]
] ]

5
.gitignore vendored
View file

@ -38,7 +38,12 @@ erl_crash.dump
# Prevent committing docs files # Prevent committing docs files
/priv/static/doc/* /priv/static/doc/*
docs/generated_config.md
# Code test coverage # Code test coverage
/cover /cover
/Elixir.*.coverdata /Elixir.*.coverdata
.idea
pleroma.iml

View file

@ -99,19 +99,14 @@ analysis:
docs-deploy: docs-deploy:
stage: deploy stage: deploy
image: alpine:3.9 image: alpine:latest
only: only:
- master@pleroma/pleroma - master@pleroma/pleroma
- develop@pleroma/pleroma - develop@pleroma/pleroma
before_script: before_script:
- apk update && apk add openssh-client rsync - apk add curl
script: script:
- mkdir -p ~/.ssh - curl -X POST -F"token=$DOCS_PIPELINE_TRIGGER" -F'ref=master' -F"variables[BRANCH]=$CI_COMMIT_REF_NAME" https://git.pleroma.social/api/v4/projects/673/trigger/pipeline
- echo "${SSH_HOST_KEY}" > ~/.ssh/known_hosts
- eval $(ssh-agent -s)
- echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add -
- rsync -hrvz --delete -e "ssh -p ${SSH_PORT}" priv/static/doc/ "${SSH_USER_HOST_LOCATION}/${CI_COMMIT_REF_NAME}"
review_app: review_app:
image: alpine:3.9 image: alpine:3.9
stage: deploy stage: deploy
@ -165,6 +160,7 @@ amd64:
only: &release-only only: &release-only
- master@pleroma/pleroma - master@pleroma/pleroma
- develop@pleroma/pleroma - develop@pleroma/pleroma
- /^maint/.*$/@pleroma/pleroma
artifacts: &release-artifacts artifacts: &release-artifacts
name: "pleroma-$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA-$CI_JOB_NAME" name: "pleroma-$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA-$CI_JOB_NAME"
paths: paths:

View file

@ -4,14 +4,51 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## [Unreleased] ## [Unreleased]
### Added
- Refreshing poll results for remote polls
- Job queue stats to the healthcheck page
- Admin API: Add ability to require password reset
- Mastodon API: Account entities now include `follow_requests_count` (planned Mastodon 3.x addition)
- Pleroma API: `GET /api/v1/pleroma/accounts/:id/scrobbles` to get a list of recently scrobbled items
- Pleroma API: `POST /api/v1/pleroma/scrobble` to scrobble a media item
- Mastodon API: Add `upload_limit`, `avatar_upload_limit`, `background_upload_limit`, and `banner_upload_limit` to `/api/v1/instance`
- Mastodon API: Add `pleroma.unread_conversation_count` to the Account entity
- OAuth: support for hierarchical permissions / [Mastodon 2.4.3 OAuth permissions](https://docs.joinmastodon.org/api/permissions/)
- Authentication: Added rate limit for password-authorized actions / login existence checks
- Metadata Link: Atom syndication Feed
### Changed
- **Breaking:** Elixir >=1.8 is now required (was >= 1.7)
- **Breaking:** Admin API: Return link alongside with token on password reset
- Replaced [pleroma_job_queue](https://git.pleroma.social/pleroma/pleroma_job_queue) and `Pleroma.Web.Federator.RetryQueue` with [Oban](https://github.com/sorentwo/oban) (see [`docs/config.md`](docs/config.md) on migrating customized worker / retry settings)
- Introduced [quantum](https://github.com/quantum-elixir/quantum-core) job scheduler
- Admin API: Return `total` when querying for reports
- Mastodon API: Return `pleroma.direct_conversation_id` when creating a direct message (`POST /api/v1/statuses`)
- Admin API: Return link alongside with token on password reset
- MRF (Simple Policy): Also use `:accept`/`:reject` on the actors rather than only their activities
- OStatus: Extract RSS functionality
### Fixed
- Mastodon API: Fix private and direct statuses not being filtered out from the public timeline for an authenticated user (`GET /api/v1/timelines/public`)
- Mastodon API: Inability to get some local users by nickname in `/api/v1/accounts/:id_or_nickname`
- Added `:instance, extended_nickname_format` setting to the default config
- Report emails now include functional links to profiles of remote user accounts
## [1.1.0] - 2019-??-??
### Security ### Security
- OStatus: eliminate the possibility of a protocol downgrade attack. - Mastodon API: respect post privacy in `/api/v1/statuses/:id/{favourited,reblogged}_by`
- OStatus: prevent following locked accounts, bypassing the approval process.
### Removed
- **Breaking:** GNU Social API with Qvitter extensions support
- Emoji: Remove longfox emojis.
- Remove `Reply-To` header from report emails for admins.
### Changed ### Changed
- **Breaking:** Configuration: A setting to explicitly disable the mailer was added, defaulting to true, if you are using a mailer add `config :pleroma, Pleroma.Emails.Mailer, enabled: true` to your config - **Breaking:** Configuration: A setting to explicitly disable the mailer was added, defaulting to true, if you are using a mailer add `config :pleroma, Pleroma.Emails.Mailer, enabled: true` to your config
- **Breaking:** Configuration: `/media/` is now removed when `base_url` is configured, append `/media/` to your `base_url` config to keep the old behaviour if desired - **Breaking:** Configuration: `/media/` is now removed when `base_url` is configured, append `/media/` to your `base_url` config to keep the old behaviour if desired
- **Breaking:** `/api/pleroma/notifications/read` is moved to `/api/v1/pleroma/notifications/read` and now supports `max_id` and responds with Mastodon API entities. - **Breaking:** `/api/pleroma/notifications/read` is moved to `/api/v1/pleroma/notifications/read` and now supports `max_id` and responds with Mastodon API entities.
- **Breaking:** `/api/pleroma/admin/users/invite_token` now uses `POST`, changed accepted params and returns full invite in json instead of only token string.
- Configuration: added `config/description.exs`, from which `docs/config.md` is generated
- Configuration: OpenGraph and TwitterCard providers enabled by default - Configuration: OpenGraph and TwitterCard providers enabled by default
- Configuration: Filter.AnonymizeFilename added ability to retain file extension with custom text - Configuration: Filter.AnonymizeFilename added ability to retain file extension with custom text
- Mastodon API: `pleroma.thread_muted` key in the Status entity - Mastodon API: `pleroma.thread_muted` key in the Status entity
@ -19,24 +56,21 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
- NodeInfo: Return `skipThreadContainment` in `metadata` for the `skip_thread_containment` option - NodeInfo: Return `skipThreadContainment` in `metadata` for the `skip_thread_containment` option
- NodeInfo: Return `mailerEnabled` in `metadata` - NodeInfo: Return `mailerEnabled` in `metadata`
- Mastodon API: Unsubscribe followers when they unfollow a user - Mastodon API: Unsubscribe followers when they unfollow a user
- Mastodon API: `pleroma.thread_muted` key in the Status entity
- AdminAPI: Add "godmode" while fetching user statuses (i.e. admin can see private statuses) - AdminAPI: Add "godmode" while fetching user statuses (i.e. admin can see private statuses)
- Improve digest email template - Improve digest email template
Pagination: (optional) return `total` alongside with `items` when paginating Pagination: (optional) return `total` alongside with `items` when paginating
- Add `rel="ugc"` to all links in statuses, to prevent SEO spam
### Fixed ### Fixed
- Following from Osada - Following from Osada
- Not being able to pin unlisted posts
- Objects being re-embedded to activities after being updated (e.g faved/reposted). Running 'mix pleroma.database prune_objects' again is advised.
- Favorites timeline doing database-intensive queries - Favorites timeline doing database-intensive queries
- Metadata rendering errors resulting in the entire page being inaccessible - Metadata rendering errors resulting in the entire page being inaccessible
- `federation_incoming_replies_max_depth` option being ignored in certain cases - `federation_incoming_replies_max_depth` option being ignored in certain cases
- Federation/MediaProxy not working with instances that have wrong certificate order
- Mastodon API: Handling of search timeouts (`/api/v1/search` and `/api/v2/search`) - Mastodon API: Handling of search timeouts (`/api/v1/search` and `/api/v2/search`)
- Mastodon API: Misskey's endless polls being unable to render - Mastodon API: Misskey's endless polls being unable to render
- Mastodon API: Embedded relationships not being properly rendered in the Account entity of Status entity - Mastodon API: Embedded relationships not being properly rendered in the Account entity of Status entity
- Mastodon API: Notifications endpoint crashing if one notification failed to render - Mastodon API: Notifications endpoint crashing if one notification failed to render
- Mastodon API: follower/following counters not being nullified, when `hide_follows`/`hide_followers` is set
- Mastodon API: `muted` in the Status entity, using author's account to determine if the tread was muted
- Mastodon API: Add `account_id`, `type`, `offset`, and `limit` to search API (`/api/v1/search` and `/api/v2/search`) - Mastodon API: Add `account_id`, `type`, `offset`, and `limit` to search API (`/api/v1/search` and `/api/v2/search`)
- Mastodon API, streaming: Fix filtering of notifications based on blocks/mutes/thread mutes - Mastodon API, streaming: Fix filtering of notifications based on blocks/mutes/thread mutes
- ActivityPub C2S: follower/following collection pages being inaccessible even when authentifucated if `hide_followers`/ `hide_follows` was set - ActivityPub C2S: follower/following collection pages being inaccessible even when authentifucated if `hide_followers`/ `hide_follows` was set
@ -44,17 +78,14 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
- Rich Media: Parser failing when no TTL can be found by image TTL setters - Rich Media: Parser failing when no TTL can be found by image TTL setters
- Rich Media: The crawled URL is now spliced into the rich media data. - Rich Media: The crawled URL is now spliced into the rich media data.
- ActivityPub S2S: sharedInbox usage has been mostly aligned with the rules in the AP specification. - ActivityPub S2S: sharedInbox usage has been mostly aligned with the rules in the AP specification.
- ActivityPub S2S: remote user deletions now work the same as local user deletions.
- ActivityPub S2S: POST requests are now signed with `(request-target)` pseudo-header.
- Not being able to access the Mastodon FE login page on private instances
- Invalid SemVer version generation, when the current branch does not have commits ahead of tag/checked out on a tag
- Pleroma.Upload base_url was not automatically whitelisted by MediaProxy. Now your custom CDN or file hosting will be accessed directly as expected. - Pleroma.Upload base_url was not automatically whitelisted by MediaProxy. Now your custom CDN or file hosting will be accessed directly as expected.
- Report email not being sent to admins when the reporter is a remote user - Report email not being sent to admins when the reporter is a remote user
- MRF: ensure that subdomain_match calls are case-insensitive
- Reverse Proxy limiting `max_body_length` was incorrectly defined and only checked `Content-Length` headers which may not be sufficient in some circumstances - Reverse Proxy limiting `max_body_length` was incorrectly defined and only checked `Content-Length` headers which may not be sufficient in some circumstances
- MRF: fix use of unserializable keyword lists in describe() implementations
- ActivityPub: Deactivated user deletion - ActivityPub: Deactivated user deletion
- ActivityPub: Fix `/users/:nickname/inbox` crashing without an authenticated user
- MRF: fix ability to follow a relay when AntiFollowbotPolicy was enabled - MRF: fix ability to follow a relay when AntiFollowbotPolicy was enabled
- Mastodon API: Blocks are now treated consistently between the Streaming API and the Timeline APIs
- Mastodon API: `exclude_replies` is correctly handled again.
### Added ### Added
- Expiring/ephemeral activites. All activities can have expires_at value set, which controls when they should be deleted automatically. - Expiring/ephemeral activites. All activities can have expires_at value set, which controls when they should be deleted automatically.
@ -62,16 +93,9 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
- Mastodon API: all status JSON responses contain a `pleroma.expires_at` item which states when an activity will expire. The value is only shown to the user who created the activity. To everyone else it's empty. - Mastodon API: all status JSON responses contain a `pleroma.expires_at` item which states when an activity will expire. The value is only shown to the user who created the activity. To everyone else it's empty.
- Configuration: `ActivityExpiration.enabled` controls whether expired activites will get deleted at the appropriate time. Enabled by default. - Configuration: `ActivityExpiration.enabled` controls whether expired activites will get deleted at the appropriate time. Enabled by default.
- Conversations: Add Pleroma-specific conversation endpoints and status posting extensions. Run the `bump_all_conversations` task again to create the necessary data. - Conversations: Add Pleroma-specific conversation endpoints and status posting extensions. Run the `bump_all_conversations` task again to create the necessary data.
- **Breaking:** MRF describe API, which adds support for exposing configuration information about MRF policies to NodeInfo.
Custom modules will need to be updated by adding, at the very least, `def describe, do: {:ok, %{}}` to the MRF policy modules.
- MRF: Support for priming the mediaproxy cache (`Pleroma.Web.ActivityPub.MRF.MediaProxyWarmingPolicy`) - MRF: Support for priming the mediaproxy cache (`Pleroma.Web.ActivityPub.MRF.MediaProxyWarmingPolicy`)
- MRF: Support for excluding specific domains from Transparency. - MRF: Support for excluding specific domains from Transparency.
- MRF: Support for filtering posts based on who they mention (`Pleroma.Web.ActivityPub.MRF.MentionPolicy`) - MRF: Support for filtering posts based on who they mention (`Pleroma.Web.ActivityPub.MRF.MentionPolicy`)
- MRF: Support for filtering posts based on ActivityStreams vocabulary (`Pleroma.Web.ActivityPub.MRF.VocabularyPolicy`)
- MRF (Simple Policy): Support for wildcard domains.
- Support for wildcard domains in user domain blocks setting.
- Configuration: `quarantined_instances` support wildcard domains.
- Configuration: `federation_incoming_replies_max_depth` option
- Mastodon API: Support for the [`tagged` filter](https://github.com/tootsuite/mastodon/pull/9755) in [`GET /api/v1/accounts/:id/statuses`](https://docs.joinmastodon.org/api/rest/accounts/#get-api-v1-accounts-id-statuses) - Mastodon API: Support for the [`tagged` filter](https://github.com/tootsuite/mastodon/pull/9755) in [`GET /api/v1/accounts/:id/statuses`](https://docs.joinmastodon.org/api/rest/accounts/#get-api-v1-accounts-id-statuses)
- Mastodon API, streaming: Add support for passing the token in the `Sec-WebSocket-Protocol` header - Mastodon API, streaming: Add support for passing the token in the `Sec-WebSocket-Protocol` header
- Mastodon API, extension: Ability to reset avatar, profile banner, and background - Mastodon API, extension: Ability to reset avatar, profile banner, and background
@ -83,6 +107,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
- Mastodon API: added `/auth/password` endpoint for password reset with rate limit. - Mastodon API: added `/auth/password` endpoint for password reset with rate limit.
- Mastodon API: /api/v1/accounts/:id/statuses now supports nicknames or user id - Mastodon API: /api/v1/accounts/:id/statuses now supports nicknames or user id
- Mastodon API: Improve support for the user profile custom fields - Mastodon API: Improve support for the user profile custom fields
- Mastodon API: follower/following counters are nullified when `hide_follows`/`hide_followers` and `hide_follows_count`/`hide_followers_count` are set
- Admin API: Return users' tags when querying reports - Admin API: Return users' tags when querying reports
- Admin API: Return avatar and display name when querying users - Admin API: Return avatar and display name when querying users
- Admin API: Allow querying user by ID - Admin API: Allow querying user by ID
@ -98,10 +123,14 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
- ActivityPub: Optional signing of ActivityPub object fetches. - ActivityPub: Optional signing of ActivityPub object fetches.
- Admin API: Endpoint for fetching latest user's statuses - Admin API: Endpoint for fetching latest user's statuses
- Pleroma API: Add `/api/v1/pleroma/accounts/confirmation_resend?email=<email>` for resending account confirmation. - Pleroma API: Add `/api/v1/pleroma/accounts/confirmation_resend?email=<email>` for resending account confirmation.
- Relays: Added a task to list relay subscriptions. - Pleroma API: Email change endpoint.
- Mix Tasks: `mix pleroma.database fix_likes_collections`
- Federation: Remove `likes` from objects.
- Admin API: Added moderation log - Admin API: Added moderation log
- Support for `X-Forwarded-For` and similar HTTP headers which used by reverse proxies to pass a real user IP address to the backend. Must not be enabled unless your instance is behind at least one reverse proxy (such as Nginx, Apache HTTPD or Varnish Cache).
- Web response cache (currently, enabled for ActivityPub)
- Mastodon API: Added an endpoint to get multiple statuses by IDs (`GET /api/v1/statuses/?ids[]=1&ids[]=2`)
- ActivityPub: Add ActivityPub actor's `discoverable` parameter.
- Admin API: Added moderation log filters (user/start date/end date/search/pagination)
- Reverse Proxy: Do not retry failed requests to limit pressure on the peer
### Changed ### Changed
- Configuration: Filter.AnonymizeFilename added ability to retain file extension with custom text - Configuration: Filter.AnonymizeFilename added ability to retain file extension with custom text
@ -109,10 +138,67 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
- RichMedia: parsers and their order are configured in `rich_media` config. - RichMedia: parsers and their order are configured in `rich_media` config.
- RichMedia: add the rich media ttl based on image expiration time. - RichMedia: add the rich media ttl based on image expiration time.
## [1.0.7] - 2019-09-26
### Fixed
- Broken federation on Erlang 22 (previous versions of hackney http client were using an option that got deprecated)
### Changed
- ActivityPub: The first page in inboxes/outboxes is no longer embedded.
## [1.0.6] - 2019-08-14
### Fixed
- MRF: fix use of unserializable keyword lists in describe() implementations
- ActivityPub S2S: POST requests are now signed with `(request-target)` pseudo-header.
## [1.0.5] - 2019-08-13
### Fixed
- Mastodon API: follower/following counters not being nullified, when `hide_follows`/`hide_followers` is set
- Mastodon API: `muted` in the Status entity, using author's account to determine if the thread was muted
- Mastodon API: return the actual profile URL in the Account entity's `url` property when appropriate
- Templates: properly style anchor tags
- Objects being re-embedded to activities after being updated (e.g faved/reposted). Running 'mix pleroma.database prune_objects' again is advised.
- Not being able to access the Mastodon FE login page on private instances
- MRF: ensure that subdomain_match calls are case-insensitive
- Fix internal server error when using the healthcheck API.
### Added
- **Breaking:** MRF describe API, which adds support for exposing configuration information about MRF policies to NodeInfo.
Custom modules will need to be updated by adding, at the very least, `def describe, do: {:ok, %{}}` to the MRF policy modules.
- Relays: Added a task to list relay subscriptions.
- MRF: Support for filtering posts based on ActivityStreams vocabulary (`Pleroma.Web.ActivityPub.MRF.VocabularyPolicy`)
- MRF (Simple Policy): Support for wildcard domains.
- Support for wildcard domains in user domain blocks setting.
- Configuration: `quarantined_instances` support wildcard domains.
- Mix Tasks: `mix pleroma.database fix_likes_collections`
- Configuration: `federation_incoming_replies_max_depth` option
### Removed ### Removed
- Emoji: Remove longfox emojis. - Federation: Remove `likes` from objects.
- Remove `Reply-To` header from report emails for admins. - **Breaking:** ActivityPub: The `accept_blocks` configuration setting.
- ActivityPub: The `accept_blocks` configuration setting.
## [1.0.4] - 2019-08-01
### Fixed
- Invalid SemVer version generation, when the current branch does not have commits ahead of tag/checked out on a tag
## [1.0.3] - 2019-07-31
### Security
- OStatus: eliminate the possibility of a protocol downgrade attack.
- OStatus: prevent following locked accounts, bypassing the approval process.
- TwitterAPI: use CommonAPI to handle remote follows instead of OStatus.
## [1.0.2] - 2019-07-28
### Fixed
- Not being able to pin unlisted posts
- Mastodon API: represent poll IDs as strings
- MediaProxy: fix matching filenames
- MediaProxy: fix filename encoding
- Migrations: fix a sporadic migration failure
- Metadata rendering errors resulting in the entire page being inaccessible
- Federation/MediaProxy not working with instances that have wrong certificate order
- ActivityPub S2S: remote user deletions now work the same as local user deletions.
### Changed
- Configuration: OpenGraph and TwitterCard providers enabled by default
- Configuration: Filter.AnonymizeFilename added ability to retain file extension with custom text
## [1.0.1] - 2019-07-14 ## [1.0.1] - 2019-07-14
### Security ### Security

View file

@ -8,7 +8,7 @@ Pleroma is a microblogging server software that can federate (= exchange message
Pleroma is written in Elixir, high-performance and can run on small devices like a Raspberry Pi. Pleroma is written in Elixir, high-performance and can run on small devices like a Raspberry Pi.
For clients it supports both the [GNU Social API with Qvitter extensions](https://twitter-api.readthedocs.io/en/latest/index.html) and the [Mastodon client API](https://docs.joinmastodon.org/api/guidelines/). For clients it supports the [Mastodon client API](https://docs.joinmastodon.org/api/guidelines/) with Pleroma extensions (see "Pleroma's APIs and Mastodon API extensions" section on <https://docs-develop.pleroma.social>).
- [Client Applications for Pleroma](https://docs-develop.pleroma.social/clients.html) - [Client Applications for Pleroma](https://docs-develop.pleroma.social/clients.html)

View file

@ -51,6 +51,24 @@
telemetry_event: [Pleroma.Repo.Instrumenter], telemetry_event: [Pleroma.Repo.Instrumenter],
migration_lock: nil migration_lock: nil
scheduled_jobs =
with digest_config <- Application.get_env(:pleroma, :email_notifications)[:digest],
true <- digest_config[:active] do
[{digest_config[:schedule], {Pleroma.Daemons.DigestEmailDaemon, :perform, []}}]
else
_ -> []
end
scheduled_jobs =
scheduled_jobs ++
[{"0 */6 * * * *", {Pleroma.Web.Websub, :refresh_subscriptions, []}}]
config :pleroma, Pleroma.Scheduler,
global: true,
overlap: true,
timezone: :utc,
jobs: scheduled_jobs
config :pleroma, Pleroma.Captcha, config :pleroma, Pleroma.Captcha,
enabled: false, enabled: false,
seconds_valid: 60, seconds_valid: 60,
@ -91,6 +109,7 @@
config :pleroma, Pleroma.Uploaders.S3, config :pleroma, Pleroma.Uploaders.S3,
bucket: nil, bucket: nil,
streaming_enabled: true,
public_endpoint: "https://s3.amazonaws.com" public_endpoint: "https://s3.amazonaws.com"
config :pleroma, Pleroma.Uploaders.MDII, config :pleroma, Pleroma.Uploaders.MDII,
@ -104,7 +123,8 @@
# Put groups that have higher priority than defaults here. Example in `docs/config/custom_emoji.md` # Put groups that have higher priority than defaults here. Example in `docs/config/custom_emoji.md`
Custom: ["/emoji/*.png", "/emoji/**/*.png"] Custom: ["/emoji/*.png", "/emoji/**/*.png"]
], ],
default_manifest: "https://git.pleroma.social/pleroma/emoji-index/raw/master/index.json" default_manifest: "https://git.pleroma.social/pleroma/emoji-index/raw/master/index.json",
shared_pack_cache_seconds_per_file: 60
config :pleroma, :uri_schemes, config :pleroma, :uri_schemes,
valid_schemes: [ valid_schemes: [
@ -258,8 +278,9 @@
max_account_fields: 10, max_account_fields: 10,
max_remote_account_fields: 20, max_remote_account_fields: 20,
account_field_name_length: 512, account_field_name_length: 512,
account_field_value_length: 512, account_field_value_length: 2048,
external_user_synchronization: true external_user_synchronization: true,
extended_nickname_format: false
config :pleroma, :markup, config :pleroma, :markup,
# XXX - unfortunately, inline images must be enabled by default right now, because # XXX - unfortunately, inline images must be enabled by default right now, because
@ -313,6 +334,10 @@
follow_handshake_timeout: 500, follow_handshake_timeout: 500,
sign_object_fetches: true sign_object_fetches: true
config :pleroma, :streamer,
workers: 3,
overflow_workers: 2
config :pleroma, :user, deny_follow_blocked: true config :pleroma, :user, deny_follow_blocked: true
config :pleroma, :mrf_normalize_markup, scrub_policy: Pleroma.HTML.Scrubber.Default config :pleroma, :mrf_normalize_markup, scrub_policy: Pleroma.HTML.Scrubber.Default
@ -373,6 +398,8 @@
config :phoenix, :format_encoders, json: Jason config :phoenix, :format_encoders, json: Jason
config :phoenix, :json_library, Jason
config :pleroma, :gopher, config :pleroma, :gopher,
enabled: false, enabled: false,
ip: {0, 0, 0, 0}, ip: {0, 0, 0, 0},
@ -382,7 +409,8 @@
providers: [ providers: [
Pleroma.Web.Metadata.Providers.OpenGraph, Pleroma.Web.Metadata.Providers.OpenGraph,
Pleroma.Web.Metadata.Providers.TwitterCard, Pleroma.Web.Metadata.Providers.TwitterCard,
Pleroma.Web.Metadata.Providers.RelMe Pleroma.Web.Metadata.Providers.RelMe,
Pleroma.Web.Metadata.Providers.Feed
], ],
unfurl_nsfw: false unfurl_nsfw: false
@ -449,13 +477,11 @@
"web" "web"
] ]
config :pleroma, Pleroma.Web.Federator.RetryQueue, config :pleroma, Oban,
enabled: false, repo: Pleroma.Repo,
max_jobs: 20, verbose: false,
initial_timeout: 30, prune: {:maxlen, 1500},
max_retries: 5 queues: [
config :pleroma_job_queue, :queues,
activity_expiration: 10, activity_expiration: 10,
federator_incoming: 50, federator_incoming: 50,
federator_outgoing: 50, federator_outgoing: 50,
@ -464,6 +490,13 @@
transmogrifier: 20, transmogrifier: 20,
scheduled_activities: 10, scheduled_activities: 10,
background: 5 background: 5
]
config :pleroma, :workers,
retries: [
federator_incoming: 5,
federator_outgoing: 5
]
config :pleroma, :fetch_initial_posts, config :pleroma, :fetch_initial_posts,
enabled: false, enabled: false,
@ -478,7 +511,7 @@
class: false, class: false,
strip_prefix: false, strip_prefix: false,
new_window: false, new_window: false,
rel: false rel: "ugc"
] ]
config :pleroma, :ldap, config :pleroma, :ldap,
@ -556,10 +589,16 @@
config :http_signatures, config :http_signatures,
adapter: Pleroma.Signature adapter: Pleroma.Signature
config :pleroma, :rate_limit, nil config :pleroma, :rate_limit, authentication: {60_000, 15}
config :pleroma, Pleroma.ActivityExpiration, enabled: true config :pleroma, Pleroma.ActivityExpiration, enabled: true
config :pleroma, Pleroma.Plugs.RemoteIp, enabled: false
config :pleroma, :web_cache_ttl,
activity_pub: nil,
activity_pub_question: 30_000
# Import environment specific config. This must remain at the bottom # Import environment specific config. This must remain at the bottom
# of this file so it overrides the configuration defined above. # of this file so it overrides the configuration defined above.
import_config "#{Mix.env()}.exs" import_config "#{Mix.env()}.exs"

2756
config/description.exs Normal file

File diff suppressed because it is too large Load diff

View file

@ -30,7 +30,8 @@
notify_email: "noreply@example.com", notify_email: "noreply@example.com",
skip_thread_containment: false, skip_thread_containment: false,
federating: false, federating: false,
external_user_synchronization: false external_user_synchronization: false,
static_dir: "test/instance_static/"
config :pleroma, :activitypub, sign_object_fetches: false config :pleroma, :activitypub, sign_object_fetches: false
@ -61,7 +62,11 @@
config :web_push_encryption, :http_client, Pleroma.Web.WebPushHttpClientMock config :web_push_encryption, :http_client, Pleroma.Web.WebPushHttpClientMock
config :pleroma_job_queue, disabled: true config :pleroma, Oban,
queues: false,
prune: :disabled
config :pleroma, Pleroma.Scheduler, jobs: []
config :pleroma, Pleroma.ScheduledActivity, config :pleroma, Pleroma.ScheduledActivity,
daily_user_limit: 2, daily_user_limit: 2,

View file

@ -60,9 +60,13 @@ Authentication is required and the user must be an admin.
- Method: `POST` - Method: `POST`
- Params: - Params:
- `nickname` `users`: [
- `email` {
- `password` `nickname`,
`email`,
`password`
}
]
- Response: Users nickname - Response: Users nickname
## `/api/pleroma/admin/users/follow` ## `/api/pleroma/admin/users/follow`
@ -220,15 +224,25 @@ Note: Available `:permission_group` is currently moderator and admin. 404 is ret
## `/api/pleroma/admin/users/invite_token` ## `/api/pleroma/admin/users/invite_token`
### Get an account registration invite token ### Create an account registration invite token
- Methods: `GET` - Methods: `POST`
- Params: - Params:
- *optional* `invite` => [
- *optional* `max_use` (integer) - *optional* `max_use` (integer)
- *optional* `expires_at` (date string e.g. "2019-04-07") - *optional* `expires_at` (date string e.g. "2019-04-07")
] - Response:
- Response: invite token (base64 string)
```json
{
"id": integer,
"token": string,
"used": boolean,
"expires_at": date,
"uses": integer,
"max_use": integer,
"invite_type": string (possible values: `one_time`, `reusable`, `date_limited`, `reusable_date_limited`)
}
```
## `/api/pleroma/admin/users/invites` ## `/api/pleroma/admin/users/invites`
@ -294,16 +308,32 @@ Note: Available `:permission_group` is currently moderator and admin. 404 is ret
- Methods: `GET` - Methods: `GET`
- Params: none - Params: none
- Response: password reset token (base64 string) - Response:
```json
{
"token": "base64 reset token",
"link": "https://pleroma.social/api/pleroma/password_reset/url-encoded-base64-token"
}
```
## `/api/pleroma/admin/users/:nickname/force_password_reset`
### Force passord reset for a user with a given nickname
- Methods: `PATCH`
- Params: none
- Response: none (code `204`)
## `/api/pleroma/admin/reports` ## `/api/pleroma/admin/reports`
### Get a list of reports ### Get a list of reports
- Method `GET` - Method `GET`
- Params: - Params:
- `state`: optional, the state of reports. Valid values are `open`, `closed` and `resolved` - *optional* `state`: **string** the state of reports. Valid values are `open`, `closed` and `resolved`
- `limit`: optional, the number of records to retrieve - *optional* `limit`: **integer** the number of records to retrieve
- `since_id`: optional, returns results that are more recent than the specified id - *optional* `page`: **integer** page number
- `max_id`: optional, returns results that are older than the specified id - *optional* `page_size`: **integer** number of log entries per page (default is `50`)
- Response: - Response:
- On failure: 403 Forbidden error `{"error": "error_msg"}` when requested by anonymous or non-admin - On failure: 403 Forbidden error `{"error": "error_msg"}` when requested by anonymous or non-admin
- On success: JSON, returns a list of reports, where: - On success: JSON, returns a list of reports, where:
@ -313,6 +343,7 @@ Note: Available `:permission_group` is currently moderator and admin. 404 is ret
```json ```json
{ {
"total" : 1,
"reports": [ "reports": [
{ {
"account": { "account": {
@ -680,6 +711,7 @@ Compile time settings (need instance reboot):
} }
] ]
} }
```
- Response: - Response:
@ -700,7 +732,11 @@ Compile time settings (need instance reboot):
- Method `GET` - Method `GET`
- Params: - Params:
- *optional* `page`: **integer** page number - *optional* `page`: **integer** page number
- *optional* `page_size`: **integer** number of users per page (default is `50`) - *optional* `page_size`: **integer** number of log entries per page (default is `50`)
- *optional* `start_date`: **datetime (ISO 8601)** filter logs by creation date, start from `start_date`. Accepts datetime in ISO 8601 format (YYYY-MM-DDThh:mm:ss), e.g. `2005-08-09T18:31:42`
- *optional* `end_date`: **datetime (ISO 8601)** filter logs by creation date, end by from `end_date`. Accepts datetime in ISO 8601 format (YYYY-MM-DDThh:mm:ss), e.g. 2005-08-09T18:31:42
- *optional* `user_id`: **integer** filter logs by actor's id
- *optional* `search`: **string** search logs by the log message
- Response: - Response:
```json ```json
@ -718,3 +754,10 @@ Compile time settings (need instance reboot):
} }
] ]
``` ```
## `POST /api/pleroma/admin/reload_emoji`
### Reload the instance's custom emoji
* Method `POST`
* Authentication: required
* Params: None
* Response: JSON, "ok" and 200 status

View file

@ -21,7 +21,8 @@ Adding the parameter `with_muted=true` to the timeline queries will also return
Has these additional fields under the `pleroma` object: Has these additional fields under the `pleroma` object:
- `local`: true if the post was made on the local instance - `local`: true if the post was made on the local instance
- `conversation_id`: the ID of the conversation the status is associated with (if any) - `conversation_id`: the ID of the AP context the status is associated with (if any)
- `direct_conversation_id`: the ID of the Mastodon direct message conversation the status is associated with (if any)
- `in_reply_to_account_acct`: the `acct` property of User entity for replied user (if any) - `in_reply_to_account_acct`: the `acct` property of User entity for replied user (if any)
- `content`: a map consisting of alternate representations of the `content` property with the key being it's mimetype. Currently the only alternate representation supported is `text/plain` - `content`: a map consisting of alternate representations of the `content` property with the key being it's mimetype. Currently the only alternate representation supported is `text/plain`
- `spoiler_text`: a map consisting of alternate representations of the `spoiler_text` property with the key being it's mimetype. Currently the only alternate representation supported is `text/plain` - `spoiler_text`: a map consisting of alternate representations of the `spoiler_text` property with the key being it's mimetype. Currently the only alternate representation supported is `text/plain`
@ -50,9 +51,12 @@ Has these additional fields under the `pleroma` object:
- `confirmation_pending`: boolean, true if a new user account is waiting on email confirmation to be activated - `confirmation_pending`: boolean, true if a new user account is waiting on email confirmation to be activated
- `hide_followers`: boolean, true when the user has follower hiding enabled - `hide_followers`: boolean, true when the user has follower hiding enabled
- `hide_follows`: boolean, true when the user has follow hiding enabled - `hide_follows`: boolean, true when the user has follow hiding enabled
- `hide_followers_count`: boolean, true when the user has follower stat hiding enabled
- `hide_follows_count`: boolean, true when the user has follow stat hiding enabled
- `settings_store`: A generic map of settings for frontends. Opaque to the backend. Only returned in `verify_credentials` and `update_credentials` - `settings_store`: A generic map of settings for frontends. Opaque to the backend. Only returned in `verify_credentials` and `update_credentials`
- `chat_token`: The token needed for Pleroma chat. Only returned in `verify_credentials` - `chat_token`: The token needed for Pleroma chat. Only returned in `verify_credentials`
- `deactivated`: boolean, true when the user is deactivated - `deactivated`: boolean, true when the user is deactivated
- `unread_conversation_count`: The count of unread conversations. Only returned to the account owner.
### Source ### Source
@ -91,6 +95,20 @@ Additional parameters can be added to the JSON body/Form data:
- `expires_in`: The number of seconds the posted activity should expire in. When a posted activity expires it will be deleted from the server, and a delete request for it will be federated. This needs to be longer than an hour. - `expires_in`: The number of seconds the posted activity should expire in. When a posted activity expires it will be deleted from the server, and a delete request for it will be federated. This needs to be longer than an hour.
- `in_reply_to_conversation_id`: Will reply to a given conversation, addressing only the people who are part of the recipient set of that conversation. Sets the visibility to `direct`. - `in_reply_to_conversation_id`: Will reply to a given conversation, addressing only the people who are part of the recipient set of that conversation. Sets the visibility to `direct`.
## GET `/api/v1/statuses`
An endpoint to get multiple statuses by IDs.
Required parameters:
- `ids`: array of activity ids
Usage example: `GET /api/v1/statuses/?ids[]=1&ids[]=2`.
Returns: array of Status.
The maximum number of statuses is limited to 100 per request.
## PATCH `/api/v1/update_credentials` ## PATCH `/api/v1/update_credentials`
Additional parameters can be added to the JSON body/Form data: Additional parameters can be added to the JSON body/Form data:
@ -98,6 +116,8 @@ Additional parameters can be added to the JSON body/Form data:
- `no_rich_text` - if true, html tags are stripped from all statuses requested from the API - `no_rich_text` - if true, html tags are stripped from all statuses requested from the API
- `hide_followers` - if true, user's followers will be hidden - `hide_followers` - if true, user's followers will be hidden
- `hide_follows` - if true, user's follows will be hidden - `hide_follows` - if true, user's follows will be hidden
- `hide_followers_count` - if true, user's follower count will be hidden
- `hide_follows_count` - if true, user's follow count will be hidden
- `hide_favorites` - if true, user's favorites timeline will be hidden - `hide_favorites` - if true, user's favorites timeline will be hidden
- `show_role` - if true, user's role (e.g admin, moderator) will be exposed to anyone in the API - `show_role` - if true, user's role (e.g admin, moderator) will be exposed to anyone in the API
- `default_scope` - the scope returned under `privacy` key in Source subentity - `default_scope` - the scope returned under `privacy` key in Source subentity

View file

@ -124,7 +124,7 @@ Request parameters can be passed via [query strings](https://en.wikipedia.org/wi
``` ```
## `/api/pleroma/admin/` ## `/api/pleroma/admin/`
See [Admin-API](Admin-API.md) See [Admin-API](admin_api.md)
## `/api/v1/pleroma/notifications/read` ## `/api/v1/pleroma/notifications/read`
### Mark notifications as read ### Mark notifications as read
@ -317,10 +317,21 @@ See [Admin-API](Admin-API.md)
"active": 0, # active processes "active": 0, # active processes
"idle": 0, # idle processes "idle": 0, # idle processes
"memory_used": 0.00, # Memory used "memory_used": 0.00, # Memory used
"healthy": true # Instance state "healthy": true, # Instance state
"job_queue_stats": {} # Job queue stats
} }
``` ```
## `/api/pleroma/change_email`
### Change account email
* Method `POST`
* Authentication: required
* Params:
* `password`: user's password
* `email`: new email
* Response: JSON. Returns `{"status": "success"}` if the change was successful, `{"error": "[error message]"}` otherwise
* Note: Currently, Mastodon has no API for changing email. If they add it in future it might be incompatible with Pleroma.
# Pleroma Conversations # Pleroma Conversations
Pleroma Conversations have the same general structure that Mastodon Conversations have. The behavior differs in the following ways when using these endpoints: Pleroma Conversations have the same general structure that Mastodon Conversations have. The behavior differs in the following ways when using these endpoints:
@ -355,3 +366,109 @@ The status posting endpoint takes an additional parameter, `in_reply_to_conversa
* Params: * Params:
* `recipients`: A list of ids of users that should receive posts to this conversation. This will replace the current list of recipients, so submit the full list. The owner of owner of the conversation will always be part of the set of recipients, though. * `recipients`: A list of ids of users that should receive posts to this conversation. This will replace the current list of recipients, so submit the full list. The owner of owner of the conversation will always be part of the set of recipients, though.
* Response: JSON, statuses (200 - healthy, 503 unhealthy) * Response: JSON, statuses (200 - healthy, 503 unhealthy)
## `GET /api/pleroma/emoji/packs`
### Lists the custom emoji packs on the server
* Method `GET`
* Authentication: not required
* Params: None
* Response: JSON, "ok" and 200 status and the JSON hashmap of "pack name" to "pack contents"
## `PUT /api/pleroma/emoji/packs/:name`
### Creates an empty custom emoji pack
* Method `PUT`
* Authentication: required
* Params: None
* Response: JSON, "ok" and 200 status or 409 if the pack with that name already exists
## `DELETE /api/pleroma/emoji/packs/:name`
### Delete a custom emoji pack
* Method `DELETE`
* Authentication: required
* Params: None
* Response: JSON, "ok" and 200 status or 500 if there was an error deleting the pack
## `POST /api/pleroma/emoji/packs/:name/update_file`
### Update a file in a custom emoji pack
* Method `POST`
* Authentication: required
* Params:
* if the `action` is `add`, adds an emoji named `shortcode` to the pack `pack_name`,
that means that the emoji file needs to be uploaded with the request
(thus requiring it to be a multipart request) and be named `file`.
There can also be an optional `filename` that will be the new emoji file name
(if it's not there, the name will be taken from the uploaded file).
* if the `action` is `update`, changes emoji shortcode
(from `shortcode` to `new_shortcode` or moves the file (from the current filename to `new_filename`)
* if the `action` is `remove`, removes the emoji named `shortcode` and it's associated file
* Response: JSON, updated "files" section of the pack and 200 status, 409 if the trying to use a shortcode
that is already taken, 400 if there was an error with the shortcode, filename or file (additional info
in the "error" part of the response JSON)
## `POST /api/pleroma/emoji/packs/:name/update_metadata`
### Updates (replaces) pack metadata
* Method `POST`
* Authentication: required
* Params:
* `new_data`: new metadata to replace the old one
* Response: JSON, updated "metadata" section of the pack and 200 status or 400 if there was a
problem with the new metadata (the error is specified in the "error" part of the response JSON)
## `POST /api/pleroma/emoji/packs/download_from`
### Requests the instance to download the pack from another instance
* Method `POST`
* Authentication: required
* Params:
* `instance_address`: the address of the instance to download from
* `pack_name`: the pack to download from that instance
* Response: JSON, "ok" and 200 status if the pack was downloaded, or 500 if there were
errors downloading the pack
## `POST /api/pleroma/emoji/packs/list_from`
### Requests the instance to list the packs from another instance
* Method `POST`
* Authentication: required
* Params:
* `instance_address`: the address of the instance to download from
* Response: JSON with the pack list, same as if the request was made to that instance's
list endpoint directly + 200 status
## `GET /api/pleroma/emoji/packs/:name/download_shared`
### Requests a local pack from the instance
* Method `GET`
* Authentication: not required
* Params: None
* Response: the archive of the pack with a 200 status code, 403 if the pack is not set as shared,
404 if the pack does not exist
## `GET /api/v1/pleroma/accounts/:id/scrobbles`
### Requests a list of current and recent Listen activities for an account
* Method `GET`
* Authentication: not required
* Params: None
* Response: An array of media metadata entities.
* Example response:
```json
[
{
"account": {...},
"id": "1234",
"title": "Some Title",
"artist": "Some Artist",
"album": "Some Album",
"length": 180000,
"created_at": "2019-09-28T12:40:45.000Z"
}
]
```
## `POST /api/v1/pleroma/scrobble`
### Creates a new Listen activity for an account
* Method `POST`
* Authentication: required
* Params:
* `title`: the title of the media playing
* `album`: the album of the media playing [optional]
* `artist`: the artist of the media playing [optional]
* `length`: the length of the media playing [optional]
* Response: the newly created media metadata entity representing the Listen activity

View file

@ -0,0 +1,19 @@
# Transfering the config to/from the database
!!! danger
This is a Work In Progress, not usable just yet.
Every command should be ran with a prefix, in case of OTP releases it is `./bin/pleroma_ctl config` and in case of source installs it's
`mix pleroma.config`.
## Transfer config from file to DB.
```sh
$PREFIX migrate_to_db
```
## Transfer config from DB to `config/env.exported_from_db.secret.exs`
```sh
$PREFIX migrate_from_db <env>
```

View file

@ -0,0 +1,48 @@
# Database maintenance tasks
Every command should be ran with a prefix, in case of OTP releases it is `./bin/pleroma_ctl database` and in case of source installs it's `mix pleroma.database`.
## Replace embedded objects with their references
Replaces embedded objects with references to them in the `objects` table. Only needs to be ran once if the instance was created before Pleroma 1.0.5. The reason why this is not a migration is because it could significantly increase the database size after being ran, however after this `VACUUM FULL` will be able to reclaim about 20% (really depends on what is in the database, your mileage may vary) of the db size before the migration.
```sh
$PREFIX remove_embedded_objects [<options>]
```
### Options
- `--vacuum` - run `VACUUM FULL` after the embedded objects are replaced with their references
## Prune old remote posts from the database
This will prune remote posts older than 90 days (configurable with [`config :pleroma, :instance, remote_post_retention_days`](../../configuration/cheatsheet.md#instance)) from the database, they will be refetched from source when accessed.
!!! note
The disk space will only be reclaimed after `VACUUM FULL`
```sh
$PREFIX pleroma.database prune_objects [<options>]
```
### Options
- `--vacuum` - run `VACUUM FULL` after the objects are pruned
## Create a conversation for all existing DMs
Can be safely re-run
```sh
$PREFIX bump_all_conversations
```
## Remove duplicated items from following and update followers count for all users
```sh
$PREFIX update_users_following_followers_counts
```
## Fix the pre-existing "likes" collections for all objects
```sh
$PREFIX fix_likes_collections
```

View file

@ -0,0 +1,13 @@
# Managing digest emails
Every command should be ran with a prefix, in case of OTP releases it is `./bin/pleroma_ctl digest` and in case of source installs it's `mix pleroma.digest`.
## Send digest email since given date (user registration date by default) ignoring user activity status.
```sh
$PREFIX test <nickname> [<since_date>]
```
Example:
```sh
$PREFIX test donaldtheduck 2019-05-20
```

View file

@ -0,0 +1,30 @@
# Managing emoji packs
Every command should be ran with a prefix, in case of OTP releases it is `./bin/pleroma_ctl emoji` and in case of source installs it's `mix pleroma.emoji`.
## Lists emoji packs and metadata specified in the manifest
```sh
$PREFIX ls-packs [<options>]
```
### Options
- `-m, --manifest PATH/URL` - path to a custom manifest, it can either be an URL starting with `http`, in that case the manifest will be fetched from that address, or a local path
## Fetch, verify and install the specified packs from the manifest into `STATIC-DIR/emoji/PACK-NAME`
```sh
$PREFIX get-packs [<options>] <packs>
```
### Options
- `-m, --manifest PATH/URL` - same as [`ls-packs`](#ls-packs)
## Create a new manifest entry and a file list from the specified remote pack file
```sh
$PREFIX gen-pack PACK-URL
```
Currently, only .zip archives are recognized as remote pack files and packs are therefore assumed to be zip archives. This command is intended to run interactively and will first ask you some basic questions about the pack, then download the remote file and generate an SHA256 checksum for it, then generate an emoji file list for you.
The manifest entry will either be written to a newly created `index.json` file or appended to the existing one, *replacing* the old pack with the same name if it was in the file previously.
The file list will be written to the file specified previously, *replacing* that file. You _should_ check that the file list doesn't contain anything you don't need in the pack, that is, anything that is not an emoji (the whole pack is downloaded, but only emoji files are extracted).

View file

@ -0,0 +1,30 @@
# Managing instance configuration
Every command should be ran with a prefix, in case of OTP releases it is `./bin/pleroma_ctl instance` and in case of source installs it's `mix pleroma.instance`.
## Generate a new configuration file
```sh
$PREFIX gen [<options>]
```
If any of the options are left unspecified, you will be prompted interactively.
### Options
- `-f`, `--force` - overwrite any output files
- `-o <path>`, `--output <path>` - the output file for the generated configuration
- `--output-psql <path>` - the output file for the generated PostgreSQL setup
- `--domain <domain>` - the domain of your instance
- `--instance-name <instance_name>` - the name of your instance
- `--admin-email <email>` - the email address of the instance admin
- `--notify-email <email>` - email address for notifications
- `--dbhost <hostname>` - the hostname of the PostgreSQL database to use
- `--dbname <database_name>` - the name of the database to use
- `--dbuser <username>` - the user (aka role) to use for the database connection
- `--dbpass <password>` - the password to use for the database connection
- `--rum <Y|N>` - Whether to enable RUM indexes
- `--indexable <Y|N>` - Allow/disallow indexing site by search engines
- `--db-configurable <Y|N>` - Allow/disallow configuring instance from admin part
- `--uploads-dir <path>` - the directory uploads go in when using a local uploader
- `--static-dir <path>` - the directory custom public files should be read from (custom emojis, frontend bundle overrides, robots.txt, etc.)
- `--listen-ip <ip>` - the ip the app should listen to, defaults to 127.0.0.1
- `--listen-port <port>` - the port the app should listen to, defaults to 4000

View file

@ -0,0 +1,30 @@
# Managing relays
Every command should be ran with a prefix, in case of OTP releases it is `./bin/pleroma_ctl relay` and in case of source installs it's `mix pleroma.relay`.
## Follow a relay
```sh
$PREFIX follow <relay_url>
```
Example:
```sh
$PREFIX follow https://example.org/relay
```
## Unfollow a remote relay
```sh
$PREFIX unfollow <relay_url>
```
Example:
```sh
$PREFIX unfollow https://example.org/relay
```
## List relay subscriptions
```sh
$PREFIX list
```

View file

@ -0,0 +1,12 @@
# Managing uploads
Every command should be ran with a prefix, in case of OTP releases it is `./bin/pleroma_ctl uploads` and in case of source installs it's `mix pleroma.uploads`.
## Migrate uploads from local to remote storage
```sh
$PREFIX migrate_local <target_uploader> [<options>]
```
### Options
- `--delete` - delete local uploads after migrating them to the target uploader
A list of available uploaders can be seen in [Configuration Cheat Sheet](../../configuration/cheatsheet.md#pleromaupload)

View file

@ -0,0 +1,94 @@
# Managing users
Every command should be ran with a prefix, in case of OTP releases it is `./bin/pleroma_ctl user` and in case of source installs it's `mix pleroma.user`.
## Create a user
```sh
$PREFIX new <nickname> <email> [<options>]
```
### Options
- `--name <name>` - the user's display name
- `--bio <bio>` - the user's bio
- `--password <password>` - the user's password
- `--moderator`/`--no-moderator` - whether the user should be a moderator
- `--admin`/`--no-admin` - whether the user should be an admin
- `-y`, `--assume-yes`/`--no-assume-yes` - whether to assume yes to all questions
## Generate an invite link
```sh
$PREFIX invite [<options>]
```
### Options
- `--expires-at DATE` - last day on which token is active (e.g. "2019-04-05")
- `--max-use NUMBER` - maximum numbers of token uses
## List generated invites
```sh
$PREFIX invites
```
## Revoke invite
```sh
$PREFIX revoke_invite <token_or_id>
```
## Delete a user
```sh
$PREFIX rm <nickname>
```
## Delete user's posts and interactions
```sh
$PREFIX delete_activities <nickname>
```
## Sign user out from all applications (delete user's OAuth tokens and authorizations)
```sh
$PREFIX sign_out <nickname>
```
## Deactivate or activate a user
```sh
$PREFIX toggle_activated <nickname>
```
## Unsubscribe local users from a user and deactivate the user
```sh
$PREFIX unsubscribe NICKNAME
```
## Unsubscribe local users from an instance and deactivate all accounts on it
```sh
$PREFIX unsubscribe_all_from_instance <instance>
```
## Create a password reset link for user
```sh
$PREFIX reset_password <nickname>
```
## Set the value of the given user's settings
```sh
$PREFIX set <nickname> [<options>]
```
### Options
- `--locked`/`--no-locked` - whether the user should be locked
- `--moderator`/`--no-moderator` - whether the user should be a moderator
- `--admin`/`--no-admin` - whether the user should be an admin
## Add tags to a user
```sh
$PREFIX tag <nickname> <tags>
```
## Delete tags from a user
```sh
$PREFIX untag <nickname> <tags>
```
## Toggle confirmation status of the user
```sh
$PREFIX toggle_confirmed <nickname>
```

View file

@ -39,7 +39,7 @@ Feel free to contact us to be added to this list!
### Nekonium ### Nekonium
- Homepage: [F-Droid Repository](https://repo.gdgd.jp.net/), [Google Play](https://play.google.com/store/apps/details?id=com.apps.nekonium), [Amazon](https://www.amazon.co.jp/dp/B076FXPRBC/) - Homepage: [F-Droid Repository](https://repo.gdgd.jp.net/), [Google Play](https://play.google.com/store/apps/details?id=com.apps.nekonium), [Amazon](https://www.amazon.co.jp/dp/B076FXPRBC/)
- Source: <https://git.gdgd.jp.net/lin/nekonium/> - Source: <https://gogs.gdgd.jp.net/lin/nekonium>
- Contact: [@lin@pleroma.gdgd.jp.net](https://pleroma.gdgd.jp.net/users/lin) - Contact: [@lin@pleroma.gdgd.jp.net](https://pleroma.gdgd.jp.net/users/lin)
- Platforms: Android - Platforms: Android
- Features: Streaming Ready - Features: Streaming Ready
@ -67,7 +67,7 @@ Feel free to contact us to be added to this list!
## Alternative Web Interfaces ## Alternative Web Interfaces
### Brutaldon ### Brutaldon
- Homepage: <https://jfm.carcosa.net/projects/software/brutaldon/> - Homepage: <https://jfm.carcosa.net/projects/software/brutaldon/>
- Source Code: <https://github.com/jfmcbrayer/brutaldon> - Source Code: <https://git.carcosa.net/jmcbray/brutaldon>
- Contact: [@gcupc@glitch.social](https://glitch.social/users/gcupc) - Contact: [@gcupc@glitch.social](https://glitch.social/users/gcupc)
- Features: No Streaming - Features: No Streaming

View file

@ -1,17 +0,0 @@
# General tips for customizing Pleroma FE
There are some configuration scripts for Pleroma BE and FE:
1. `config/prod.secret.exs`
1. `config/config.exs`
1. `priv/static/static/config.json`
The `prod.secret.exs` affects first. `config.exs` is for fallback or default. `config.json` is for GNU-social-BE-Pleroma-FE instances.
Usually all you have to do is:
1. Copy the section in the `config/config.exs` which you want to activate.
1. Paste into `config/prod.secret.exs`.
1. Edit `config/prod.secret.exs`.
1. Restart the Pleroma daemon.
`prod.secret.exs` is for the `MIX_ENV=prod` environment. `dev.secret.exs` is for the `MIX_ENV=dev` environment respectively.

View file

@ -1,12 +0,0 @@
# Small customizations
See also static_dir.md for visual settings.
## Theme
All users of your instance will be able to change the theme they use by going to the settings (the cog in the top-right hand corner). However, if you wish to change the default theme, you can do so by editing `theme` in `config/dev.secret.exs` accordingly.
## Message Visibility
To enable message visibility options when posting like in the Mastodon frontend, set
`scope_options_enabled` to `true` in `config/dev.secret.exs`.

View file

@ -1,7 +1,11 @@
# Configuration # Configuration Cheat Sheet
This is a cheat sheet for Pleroma configuration file, any setting possible to configure should be listed here.
Pleroma configuration works by first importing the base config (`config/config.exs` on source installs, compiled-in on OTP releases), then overriding it by the environment config (`config/$MIX_ENV.exs` on source installs, N/A to OTP releases) and then overriding it by user config (`config/$MIX_ENV.secret.exs` on source installs, typically `/etc/pleroma/config.exs` on OTP releases).
You shouldn't edit the base config directly to avoid breakages and merge conflicts, but it can be used as a reference if you don't understand how an option is supposed to be formatted, the latest version of it can be viewed [here](https://git.pleroma.social/pleroma/pleroma/blob/develop/config/config.exs).
This file describe the configuration, it is recommended to edit the relevant *.secret.exs file instead of the others founds in the ``config`` directory.
If you run Pleroma with ``MIX_ENV=prod`` the file is ``prod.secret.exs``, otherwise it is ``dev.secret.exs``.
## Pleroma.Upload ## Pleroma.Upload
* `uploader`: Select which `Pleroma.Uploaders` to use * `uploader`: Select which `Pleroma.Uploaders` to use
@ -11,7 +15,8 @@ If you run Pleroma with ``MIX_ENV=prod`` the file is ``prod.secret.exs``, otherw
* `proxy_remote`: If you're using a remote uploader, Pleroma will proxy media requests instead of redirecting to it. * `proxy_remote`: If you're using a remote uploader, Pleroma will proxy media requests instead of redirecting to it.
* `proxy_opts`: Proxy options, see `Pleroma.ReverseProxy` documentation. * `proxy_opts`: Proxy options, see `Pleroma.ReverseProxy` documentation.
Note: `strip_exif` has been replaced by `Pleroma.Upload.Filter.Mogrify`. !!! warning
`strip_exif` has been replaced by `Pleroma.Upload.Filter.Mogrify`.
## Pleroma.Uploaders.Local ## Pleroma.Uploaders.Local
* `uploads`: Which directory to store the user-uploads in, relative to pleromas working directory * `uploads`: Which directory to store the user-uploads in, relative to pleromas working directory
@ -23,6 +28,7 @@ Note: `strip_exif` has been replaced by `Pleroma.Upload.Filter.Mogrify`.
* `truncated_namespace`: If you use S3 compatible service such as Digital Ocean Spaces or CDN, set folder name or "" etc. * `truncated_namespace`: If you use S3 compatible service such as Digital Ocean Spaces or CDN, set folder name or "" etc.
For example, when using CDN to S3 virtual host format, set "". For example, when using CDN to S3 virtual host format, set "".
At this time, write CNAME to CDN in public_endpoint. At this time, write CNAME to CDN in public_endpoint.
* `streaming_enabled`: Enable streaming uploads, when enabled the file will be sent to the server in chunks as it's being read. This may be unsupported by some providers, try disabling this if you have upload problems.
## Pleroma.Upload.Filter.Mogrify ## Pleroma.Upload.Filter.Mogrify
@ -110,12 +116,6 @@ config :pleroma, Pleroma.Emails.Mailer,
* `allowed_post_formats`: MIME-type list of formats allowed to be posted (transformed into HTML) * `allowed_post_formats`: MIME-type list of formats allowed to be posted (transformed into HTML)
* `mrf_transparency`: Make the content of your Message Rewrite Facility settings public (via nodeinfo). * `mrf_transparency`: Make the content of your Message Rewrite Facility settings public (via nodeinfo).
* `mrf_transparency_exclusions`: Exclude specific instance names from MRF transparency. The use of the exclusions feature will be disclosed in nodeinfo as a boolean value. * `mrf_transparency_exclusions`: Exclude specific instance names from MRF transparency. The use of the exclusions feature will be disclosed in nodeinfo as a boolean value.
* `scope_copy`: Copy the scope (private/unlisted/public) in replies to posts by default.
* `subject_line_behavior`: Allows changing the default behaviour of subject lines in replies. Valid values:
* "email": Copy and preprend re:, as in email.
* "masto": Copy verbatim, as in Mastodon.
* "noop": Don't copy the subject.
* `always_show_subject_input`: When set to false, auto-hide the subject field when it's empty.
* `extended_nickname_format`: Set to `true` to use extended local nicknames format (allows underscores/dashes). This will break federation with * `extended_nickname_format`: Set to `true` to use extended local nicknames format (allows underscores/dashes). This will break federation with
older software for theses nicknames. older software for theses nicknames.
* `max_pinned_statuses`: The maximum number of pinned statuses. `0` will disable the feature. * `max_pinned_statuses`: The maximum number of pinned statuses. `0` will disable the feature.
@ -131,13 +131,17 @@ config :pleroma, Pleroma.Emails.Mailer,
* `user_name_length`: A user name maximum length (default: `100`) * `user_name_length`: A user name maximum length (default: `100`)
* `skip_thread_containment`: Skip filter out broken threads. The default is `false`. * `skip_thread_containment`: Skip filter out broken threads. The default is `false`.
* `limit_to_local_content`: Limit unauthenticated users to search for local statutes and users only. Possible values: `:unauthenticated`, `:all` and `false`. The default is `:unauthenticated`. * `limit_to_local_content`: Limit unauthenticated users to search for local statutes and users only. Possible values: `:unauthenticated`, `:all` and `false`. The default is `:unauthenticated`.
* `dynamic_configuration`: Allow transferring configuration to DB with the subsequent customization from Admin api.
* `max_account_fields`: The maximum number of custom fields in the user profile (default: `10`) * `max_account_fields`: The maximum number of custom fields in the user profile (default: `10`)
* `max_remote_account_fields`: The maximum number of custom fields in the remote user profile (default: `20`) * `max_remote_account_fields`: The maximum number of custom fields in the remote user profile (default: `20`)
* `account_field_name_length`: An account field name maximum length (default: `512`) * `account_field_name_length`: An account field name maximum length (default: `512`)
* `account_field_value_length`: An account field value maximum length (default: `512`) * `account_field_value_length`: An account field value maximum length (default: `2048`)
* `external_user_synchronization`: Enabling following/followers counters synchronization for external users. * `external_user_synchronization`: Enabling following/followers counters synchronization for external users.
!!! danger
This is a Work In Progress, not usable just yet
* `dynamic_configuration`: Allow transferring configuration to DB with the subsequent customization from Admin api.
## :logger ## :logger
@ -185,7 +189,7 @@ See the [Quack Github](https://github.com/azohra/quack) for more details
## :frontend_configurations ## :frontend_configurations
This can be used to configure a keyword list that keeps the configuration data for any kind of frontend. By default, settings for `pleroma_fe` and `masto_fe` are configured. This can be used to configure a keyword list that keeps the configuration data for any kind of frontend. By default, settings for `pleroma_fe` and `masto_fe` are configured. You can find the documentation for `pleroma_fe` configuration into [Pleroma-FE configuration and customization for instance administrators](/frontend/CONFIGURATION/#options).
Frontends can access these settings at `/api/pleroma/frontend_configurations` Frontends can access these settings at `/api/pleroma/frontend_configurations`
@ -207,6 +211,7 @@ These settings **need to be complete**, they will override the defaults.
NOTE: for versions < 1.0, you need to set [`:fe`](#fe) to false, as shown a few lines below. NOTE: for versions < 1.0, you need to set [`:fe`](#fe) to false, as shown a few lines below.
## :fe ## :fe
!!! warning
__THIS IS DEPRECATED__ __THIS IS DEPRECATED__
If you are using this method, please change it to the [`frontend_configurations`](#frontend_configurations) method. If you are using this method, please change it to the [`frontend_configurations`](#frontend_configurations) method.
@ -260,7 +265,7 @@ All criteria are configured as a map of regular expressions to lists of policy m
Example: Example:
``` ```elixir
config :pleroma, :mrf_subchain, config :pleroma, :mrf_subchain,
match_actor: %{ match_actor: %{
~r/https:\/\/example.com/s => [Pleroma.Web.ActivityPub.MRF.DropPolicy] ~r/https:\/\/example.com/s => [Pleroma.Web.ActivityPub.MRF.DropPolicy]
@ -300,7 +305,10 @@ config :pleroma, :mrf_subchain,
* `dstport`: Port advertised in urls (optional, defaults to `port`) * `dstport`: Port advertised in urls (optional, defaults to `port`)
## Pleroma.Web.Endpoint ## Pleroma.Web.Endpoint
`Phoenix` endpoint configuration, all configuration options can be viewed [here](https://hexdocs.pm/phoenix/Phoenix.Endpoint.html#module-dynamic-configuration), only common options are listed here
!!! note
`Phoenix` endpoint configuration, all configuration options can be viewed [here](https://hexdocs.pm/phoenix/Phoenix.Endpoint.html#module-dynamic-configuration), only common options are listed here.
* `http` - a list containing http protocol configuration, all configuration options can be viewed [here](https://hexdocs.pm/plug_cowboy/Plug.Cowboy.html#module-options), only common options are listed here. For deployment using docker, you need to set this to `[ip: {0,0,0,0}, port: 4000]` to make pleroma accessible from other containers (such as your nginx server). * `http` - a list containing http protocol configuration, all configuration options can be viewed [here](https://hexdocs.pm/plug_cowboy/Plug.Cowboy.html#module-options), only common options are listed here. For deployment using docker, you need to set this to `[ip: {0,0,0,0}, port: 4000]` to make pleroma accessible from other containers (such as your nginx server).
- `ip` - a tuple consisting of 4 integers - `ip` - a tuple consisting of 4 integers
- `port` - `port`
@ -313,7 +321,8 @@ config :pleroma, :mrf_subchain,
**Important note**: if you modify anything inside these lists, default `config.exs` values will be overwritten, which may result in breakage, to make sure this does not happen please copy the default value for the list from `config.exs` and modify/add only what you need !!! warning
If you modify anything inside these lists, default `config.exs` values will be overwritten, which may result in breakage, to make sure this does not happen please copy the default value for the list from `config.exs` and modify/add only what you need
Example: Example:
```elixir ```elixir
@ -400,41 +409,73 @@ You can then do
curl "http://localhost:4000/api/pleroma/admin/invite_token?admin_token=somerandomtoken" curl "http://localhost:4000/api/pleroma/admin/invite_token?admin_token=somerandomtoken"
``` ```
## :pleroma_job_queue ## Oban
[Pleroma Job Queue](https://git.pleroma.social/pleroma/pleroma_job_queue) configuration: a list of queues with maximum concurrent jobs. [Oban](https://github.com/sorentwo/oban) asynchronous job processor configuration.
Configuration options described in [Oban readme](https://github.com/sorentwo/oban#usage):
* `repo` - app's Ecto repo (`Pleroma.Repo`)
* `verbose` - logs verbosity
* `prune` - non-retryable jobs [pruning settings](https://github.com/sorentwo/oban#pruning) (`:disabled` / `{:maxlen, value}` / `{:maxage, value}`)
* `queues` - job queues (see below)
Pleroma has the following queues: Pleroma has the following queues:
* `activity_expiration` - Activity expiration
* `federator_outgoing` - Outgoing federation * `federator_outgoing` - Outgoing federation
* `federator_incoming` - Incoming federation * `federator_incoming` - Incoming federation
* `mailer` - Email sender, see [`Pleroma.Emails.Mailer`](#pleroma-emails-mailer) * `mailer` - Email sender, see [`Pleroma.Emails.Mailer`](#pleromaemailsmailer)
* `transmogrifier` - Transmogrifier * `transmogrifier` - Transmogrifier
* `web_push` - Web push notifications * `web_push` - Web push notifications
* `scheduled_activities` - Scheduled activities, see [`Pleroma.ScheduledActivities`](#pleromascheduledactivity) * `scheduled_activities` - Scheduled activities, see [`Pleroma.ScheduledActivity`](#pleromascheduledactivity)
Example: Example:
```elixir ```elixir
config :pleroma_job_queue, :queues, config :pleroma, Oban,
repo: Pleroma.Repo,
verbose: false,
prune: {:maxlen, 1500},
queues: [
federator_incoming: 50, federator_incoming: 50,
federator_outgoing: 50 federator_outgoing: 50
]
``` ```
This config contains two queues: `federator_incoming` and `federator_outgoing`. Both have the `max_jobs` set to `50`. This config contains two queues: `federator_incoming` and `federator_outgoing`. Both have the number of max concurrent jobs set to `50`.
## Pleroma.Web.Federator.RetryQueue ### Migrating `pleroma_job_queue` settings
* `enabled`: If set to `true`, failed federation jobs will be retried `config :pleroma_job_queue, :queues` is replaced by `config :pleroma, Oban, :queues` and uses the same format (keys are queues' names, values are max concurrent jobs numbers).
* `max_jobs`: The maximum amount of parallel federation jobs running at the same time.
* `initial_timeout`: The initial timeout in seconds ## :workers
* `max_retries`: The maximum number of times a federation job is retried
Includes custom worker options not interpretable directly by `Oban`.
* `retries` — keyword lists where keys are `Oban` queues (see above) and values are numbers of max attempts for failed jobs.
Example:
```elixir
config :pleroma, :workers,
retries: [
federator_incoming: 5,
federator_outgoing: 5
]
```
### Migrating `Pleroma.Web.Federator.RetryQueue` settings
* `max_retries` is replaced with `config :pleroma, :workers, retries: [federator_outgoing: 5]`
* `enabled: false` corresponds to `config :pleroma, :workers, retries: [federator_outgoing: 1]`
* deprecated options: `max_jobs`, `initial_timeout`
## Pleroma.Web.Metadata ## Pleroma.Web.Metadata
* `providers`: a list of metadata providers to enable. Providers available: * `providers`: a list of metadata providers to enable. Providers available:
* Pleroma.Web.Metadata.Providers.OpenGraph * Pleroma.Web.Metadata.Providers.OpenGraph
* Pleroma.Web.Metadata.Providers.TwitterCard * Pleroma.Web.Metadata.Providers.TwitterCard
* Pleroma.Web.Metadata.Providers.RelMe - add links from user bio with rel=me into the `<header>` as `<link rel=me>` * Pleroma.Web.Metadata.Providers.RelMe - add links from user bio with rel=me into the `<header>` as `<link rel=me>`
* Pleroma.Web.Metadata.Providers.Feed - add a link to a user's Atom feed into the `<header>` as `<link rel=alternate>`
* `unfurl_nsfw`: If set to `true` nsfw attachments will be shown in previews * `unfurl_nsfw`: If set to `true` nsfw attachments will be shown in previews
## :rich_media ## :rich_media
@ -485,10 +526,28 @@ config :auto_linker,
class: false, class: false,
strip_prefix: false, strip_prefix: false,
new_window: false, new_window: false,
rel: false rel: "ugc"
] ]
``` ```
## Pleroma.Scheduler
Configuration for [Quantum](https://github.com/quantum-elixir/quantum-core) jobs scheduler.
See [Quantum readme](https://github.com/quantum-elixir/quantum-core#usage) for the list of supported options.
Example:
```elixir
config :pleroma, Pleroma.Scheduler,
global: true,
overlap: true,
timezone: :utc,
jobs: [{"0 */6 * * * *", {Pleroma.Web.Websub, :refresh_subscriptions, []}}]
```
The above example defines a single job which invokes `Pleroma.Web.Websub.refresh_subscriptions()` every 6 hours ("0 */6 * * * *", [crontab format](https://en.wikipedia.org/wiki/Cron)).
## Pleroma.ScheduledActivity ## Pleroma.ScheduledActivity
* `daily_user_limit`: the number of scheduled activities a user is allowed to create in a single day (Default: `25`) * `daily_user_limit`: the number of scheduled activities a user is allowed to create in a single day (Default: `25`)
@ -497,7 +556,7 @@ config :auto_linker,
## Pleroma.ActivityExpiration ## Pleroma.ActivityExpiration
# `enabled`: whether expired activities will be sent to the job queue to be deleted * `enabled`: whether expired activities will be sent to the job queue to be deleted
## Pleroma.Web.Auth.Authenticator ## Pleroma.Web.Auth.Authenticator
@ -573,13 +632,14 @@ Email notifications settings.
OAuth consumer mode allows sign in / sign up via external OAuth providers (e.g. Twitter, Facebook, Google, Microsoft, etc.). OAuth consumer mode allows sign in / sign up via external OAuth providers (e.g. Twitter, Facebook, Google, Microsoft, etc.).
Implementation is based on Ueberauth; see the list of [available strategies](https://github.com/ueberauth/ueberauth/wiki/List-of-Strategies). Implementation is based on Ueberauth; see the list of [available strategies](https://github.com/ueberauth/ueberauth/wiki/List-of-Strategies).
Note: each strategy is shipped as a separate dependency; in order to get the strategies, run `OAUTH_CONSUMER_STRATEGIES="..." mix deps.get`, !!! note
e.g. `OAUTH_CONSUMER_STRATEGIES="twitter facebook google microsoft" mix deps.get`. Each strategy is shipped as a separate dependency; in order to get the strategies, run `OAUTH_CONSUMER_STRATEGIES="..." mix deps.get`, e.g. `OAUTH_CONSUMER_STRATEGIES="twitter facebook google microsoft" mix deps.get`. The server should also be started with `OAUTH_CONSUMER_STRATEGIES="..." mix phx.server` in case you enable any strategies.
The server should also be started with `OAUTH_CONSUMER_STRATEGIES="..." mix phx.server` in case you enable any strategies.
Note: each strategy requires separate setup (on external provider side and Pleroma side). Below are the guidelines on setting up most popular strategies. !!! note
Each strategy requires separate setup (on external provider side and Pleroma side). Below are the guidelines on setting up most popular strategies.
Note: make sure that `"SameSite=Lax"` is set in `extra_cookie_attrs` when you have this feature enabled. OAuth consumer mode will not work with `"SameSite=Strict"` !!! note
Make sure that `"SameSite=Lax"` is set in `extra_cookie_attrs` when you have this feature enabled. OAuth consumer mode will not work with `"SameSite=Strict"`
* For Twitter, [register an app](https://developer.twitter.com/en/apps), configure callback URL to https://<your_host>/oauth/twitter/callback * For Twitter, [register an app](https://developer.twitter.com/en/apps), configure callback URL to https://<your_host>/oauth/twitter/callback
@ -653,6 +713,8 @@ Configure OAuth 2 provider capabilities:
* `pack_extensions`: A list of file extensions for emojis, when no emoji.txt for a pack is present. Example `[".png", ".gif"]` * `pack_extensions`: A list of file extensions for emojis, when no emoji.txt for a pack is present. Example `[".png", ".gif"]`
* `groups`: Emojis are ordered in groups (tags). This is an array of key-value pairs where the key is the groupname and the value the location or array of locations. `*` can be used as a wildcard. Example `[Custom: ["/emoji/*.png", "/emoji/custom/*.png"]]` * `groups`: Emojis are ordered in groups (tags). This is an array of key-value pairs where the key is the groupname and the value the location or array of locations. `*` can be used as a wildcard. Example `[Custom: ["/emoji/*.png", "/emoji/custom/*.png"]]`
* `default_manifest`: Location of the JSON-manifest. This manifest contains information about the emoji-packs you can download. Currently only one manifest can be added (no arrays). * `default_manifest`: Location of the JSON-manifest. This manifest contains information about the emoji-packs you can download. Currently only one manifest can be added (no arrays).
* `shared_pack_cache_seconds_per_file`: When an emoji pack is shared, the archive is created and cached in
memory for this amount of seconds multiplied by the number of files.
## Database options ## Database options
@ -673,6 +735,8 @@ This will probably take a long time.
This is an advanced feature and disabled by default. This is an advanced feature and disabled by default.
If your instance is behind a reverse proxy you must enable and configure [`Pleroma.Plugs.RemoteIp`](#pleroma-plugs-remoteip).
A keyword list of rate limiters where a key is a limiter name and value is the limiter configuration. The basic configuration is a tuple where: A keyword list of rate limiters where a key is a limiter name and value is the limiter configuration. The basic configuration is a tuple where:
* The first element: `scale` (Integer). The time scale in milliseconds. * The first element: `scale` (Integer). The time scale in milliseconds.
@ -680,8 +744,6 @@ A keyword list of rate limiters where a key is a limiter name and value is the l
It is also possible to have different limits for unauthenticated and authenticated users: the keyword value must be a list of two tuples where the first one is a config for unauthenticated users and the second one is for authenticated. It is also possible to have different limits for unauthenticated and authenticated users: the keyword value must be a list of two tuples where the first one is a config for unauthenticated users and the second one is for authenticated.
See [`Pleroma.Plugs.RateLimiter`](Pleroma.Plugs.RateLimiter.html) documentation for examples.
Supported rate limiters: Supported rate limiters:
* `:search` for the search requests (account & status search etc.) * `:search` for the search requests (account & status search etc.)
@ -690,3 +752,26 @@ Supported rate limiters:
* `:relation_id_action` for actions on relation with a specific user (follow, unfollow) * `:relation_id_action` for actions on relation with a specific user (follow, unfollow)
* `:statuses_actions` for create / delete / fav / unfav / reblog / unreblog actions on any statuses * `:statuses_actions` for create / delete / fav / unfav / reblog / unreblog actions on any statuses
* `:status_id_action` for fav / unfav or reblog / unreblog actions on the same status by the same user * `:status_id_action` for fav / unfav or reblog / unreblog actions on the same status by the same user
## :web_cache_ttl
The expiration time for the web responses cache. Values should be in milliseconds or `nil` to disable expiration.
Available caches:
* `:activity_pub` - activity pub routes (except question activities). Defaults to `nil` (no expiration).
* `:activity_pub_question` - activity pub routes (question activities). Defaults to `30_000` (30 seconds).
## Pleroma.Plugs.RemoteIp
!!! warning
If your instance is not behind at least one reverse proxy, you should not enable this plug.
`Pleroma.Plugs.RemoteIp` is a shim to call [`RemoteIp`](https://git.pleroma.social/pleroma/remote_ip) but with runtime configuration.
Available options:
* `enabled` - Enable/disable the plug. Defaults to `false`.
* `headers` - A list of strings naming the `req_headers` to use when deriving the `remote_ip`. Order does not matter. Defaults to `~w[forwarded x-forwarded-for x-client-ip x-real-ip]`.
* `proxies` - A list of strings in [CIDR](https://en.wikipedia.org/wiki/CIDR) notation specifying the IPs of known proxies. Defaults to `[]`.
* `reserved` - Defaults to [localhost](https://en.wikipedia.org/wiki/Localhost) and [private network](https://en.wikipedia.org/wiki/Private_network).

View file

@ -4,6 +4,7 @@ Before you add your own custom emoji, check if they are available in an existing
See `Mix.Tasks.Pleroma.Emoji` for information about emoji packs. See `Mix.Tasks.Pleroma.Emoji` for information about emoji packs.
To add custom emoji: To add custom emoji:
* Create the `STATIC-DIR/emoji/` directory if it doesn't exist * Create the `STATIC-DIR/emoji/` directory if it doesn't exist
(`STATIC-DIR` is configurable, `instance/static/` by default) (`STATIC-DIR` is configurable, `instance/static/` by default)
* Create a directory with whatever name you want (custom is a good name to show the purpose of it). * Create a directory with whatever name you want (custom is a good name to show the purpose of it).

View file

@ -1,7 +1,9 @@
# Installing on Alpine Linux # Installing on Alpine Linux
## Installation ## Installation
This guide is a step-by-step installation guide for Alpine Linux. It also assumes that you have administrative rights, either as root or a user with [sudo permissions](https://www.linode.com/docs/tools-reference/custom-kernels-distros/install-alpine-linux-on-your-linode/#configuration). If you want to run this guide with root, ignore the `sudo` at the beginning of the lines, unless it calls a user like `sudo -Hu pleroma`; in this case, use `su -l <username> -s $SHELL -c 'command'` instead. This guide is a step-by-step installation guide for Alpine Linux. The instructions were verified against Alpine v3.10 standard image. You might miss additional dependencies if you use `netboot` instead.
It assumes that you have administrative rights, either as root or a user with [sudo permissions](https://www.linode.com/docs/tools-reference/custom-kernels-distros/install-alpine-linux-on-your-linode/#configuration). If you want to run this guide with root, ignore the `sudo` at the beginning of the lines, unless it calls a user like `sudo -Hu pleroma`; in this case, use `su -l <username> -s $SHELL -c 'command'` instead.
### Required packages ### Required packages
@ -20,12 +22,13 @@ This guide is a step-by-step installation guide for Alpine Linux. It also assume
### Prepare the system ### Prepare the system
* First make sure to have the community repository enabled: * The community repository must be enabled in `/etc/apk/repositories`. Depending on which version and mirror you use this looks like `http://alpine.42.fr/v3.10/community`. If you autogenerated the mirror during installation:
```shell ```shell
echo "https://nl.alpinelinux.org/alpine/latest-stable/community" | sudo tee -a /etc/apk/repository awk 'NR==2' /etc/apk/repositories | sed 's/main/community/' | tee -a /etc/apk/repositories
``` ```
* Then update the system, if not already done: * Then update the system, if not already done:
```shell ```shell
@ -77,7 +80,8 @@ sudo rc-update add postgresql
* Add a new system user for the Pleroma service: * Add a new system user for the Pleroma service:
```shell ```shell
sudo adduser -S -s /bin/false -h /opt/pleroma -H pleroma sudo addgroup pleroma
sudo adduser -S -s /bin/false -h /opt/pleroma -H -G pleroma pleroma
``` ```
**Note**: To execute a single command as the Pleroma system user, use `sudo -Hu pleroma command`. You can also switch to a shell by using `sudo -Hu pleroma $SHELL`. If you dont have and want `sudo` on your system, you can use `su` as root user (UID 0) for a single command by using `su -l pleroma -s $SHELL -c 'command'` and `su -l pleroma -s $SHELL` for starting a shell. **Note**: To execute a single command as the Pleroma system user, use `sudo -Hu pleroma command`. You can also switch to a shell by using `sudo -Hu pleroma $SHELL`. If you dont have and want `sudo` on your system, you can use `su` as root user (UID 0) for a single command by using `su -l pleroma -s $SHELL -c 'command'` and `su -l pleroma -s $SHELL` for starting a shell.
@ -164,7 +168,26 @@ If that doesnt work, make sure, that nginx is not already running. If it stil
sudo cp /opt/pleroma/installation/pleroma.nginx /etc/nginx/conf.d/pleroma.conf sudo cp /opt/pleroma/installation/pleroma.nginx /etc/nginx/conf.d/pleroma.conf
``` ```
* Before starting nginx edit the configuration and change it to your needs (e.g. change servername, change cert paths) * Before starting nginx edit the configuration and change it to your needs. You must change change `server_name` and the paths to the certificates. You can use `nano` (install with `apk add nano` if missing).
```
server {
server_name your.domain;
listen 80;
...
}
server {
server_name your.domain;
listen 443 ssl http2;
...
ssl_trusted_certificate /etc/letsencrypt/live/your.domain/chain.pem;
ssl_certificate /etc/letsencrypt/live/your.domain/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/your.domain/privkey.pem;
...
}
```
* Enable and start nginx: * Enable and start nginx:
```shell ```shell
@ -202,12 +225,10 @@ sudo -Hu pleroma MIX_ENV=prod mix pleroma.user new <username> <your@emailaddress
#### Further reading #### Further reading
* [Backup your instance](backup.html) * [Backup your instance](../administration/backup.md)
* [Configuration tips](general-tips-for-customizing-pleroma-fe.html) * [Hardening your instance](../configuration/hardening.md)
* [Hardening your instance](hardening.html) * [How to activate mediaproxy](../configuration/howto_mediaproxy.md)
* [How to activate mediaproxy](howto_mediaproxy.html) * [Updating your instance](../administration/updating.md)
* [Small Pleroma-FE customizations](small_customizations.html)
* [Updating your instance](updating.html)
## Questions ## Questions

View file

@ -200,12 +200,10 @@ sudo -Hu pleroma MIX_ENV=prod mix pleroma.user new <username> <your@emailaddress
#### Further reading #### Further reading
* [Backup your instance](backup.html) * [Backup your instance](../administration/backup.md)
* [Configuration tips](general-tips-for-customizing-pleroma-fe.html) * [Hardening your instance](../configuration/hardening.md)
* [Hardening your instance](hardening.html) * [How to activate mediaproxy](../configuration/howto_mediaproxy.md)
* [How to activate mediaproxy](howto_mediaproxy.html) * [Updating your instance](../administration/updating.md)
* [Small Pleroma-FE customizations](small_customizations.html)
* [Updating your instance](updating.html)
## Questions ## Questions

View file

@ -264,12 +264,10 @@ sudo -Hu pleroma MIX_ENV=prod mix pleroma.user new <username> <your@emailaddress
#### Further reading #### Further reading
* [Backup your instance](backup.html) * [Backup your instance](../administration/backup.md)
* [Configuration tips](general-tips-for-customizing-pleroma-fe.html) * [Hardening your instance](../configuration/hardening.md)
* [Hardening your instance](hardening.html) * [How to activate mediaproxy](../configuration/howto_mediaproxy.md)
* [How to activate mediaproxy](howto_mediaproxy.html) * [Updating your instance](../administration/updating.md)
* [Small Pleroma-FE customizations](small_customizations.html)
* [Updating your instance](updating.html)
## Questions ## Questions

View file

@ -190,12 +190,10 @@ sudo -Hu pleroma MIX_ENV=prod mix pleroma.user new <username> <your@emailaddress
#### Further reading #### Further reading
* [Backup your instance](backup.html) * [Backup your instance](../administration/backup.md)
* [Configuration tips](general-tips-for-customizing-pleroma-fe.html) * [Hardening your instance](../configuration/hardening.md)
* [Hardening your instance](hardening.html) * [How to activate mediaproxy](../configuration/howto_mediaproxy.md)
* [How to activate mediaproxy](howto_mediaproxy.html) * [Updating your instance](../administration/updating.md)
* [Small Pleroma-FE customizations](small_customizations.html)
* [Updating your instance](updating.html)
## Questions ## Questions

View file

@ -5,187 +5,184 @@
## インストール ## インストール
このガイドはDebian Stretchを仮定しています。Ubuntu 16.04でも可能です このガイドはDebian Stretchを利用することを想定しています。Ubuntu 16.04や18.04でもおそらく動作します。また、ユーザはrootもしくはsudoにより管理者権限を持っていることを前提とします。もし、以下の操作をrootユーザで行う場合は、 `sudo` を無視してください。ただし、`sudo -Hu pleroma` のようにユーザを指定している場合には `su <username> -s $SHELL -c 'command'` を代わりに使ってください
### 必要なソフトウェア ### 必要なソフトウェア
- PostgreSQL 9.6+ (postgresql-contrib-9.6 または他のバージョンの PSQL をインストールしてください) - PostgreSQL 9.6以上 (Ubuntu16.04では9.5しか提供されていないので,[](https://www.postgresql.org/download/linux/ubuntu/)こちらから新しいバージョンを入手してください)
- Elixir 1.5 以上 ([Debianのリポジトリからインストールしないこと ここからインストールすること!](https://elixir-lang.org/install.html#unix-and-unix-like))。または [asdf](https://github.com/asdf-vm/asdf) を pleroma ユーザーでインストール。 - postgresql-contrib 9.6以上 (同上)
- Elixir 1.5 以上 ([Debianのリポジトリからインストールしないこと ここからインストールすること!](https://elixir-lang.org/install.html#unix-and-unix-like)。または [asdf](https://github.com/asdf-vm/asdf) をpleromaユーザーでインストールしてください)
- erlang-dev - erlang-dev
- erlang-tools - erlang-tools
- erlang-parsetools - erlang-parsetools
- erlang-eldap (LDAP認証を有効化するときのみ必要)
- erlang-ssh - erlang-ssh
- erlang-xmerl (Jessieではバックポートからインストールすること) - erlang-xmerl
- git - git
- build-essential - build-essential
- openssh
- openssl #### このガイドで利用している追加パッケージ
- nginx prefered (Apacheも動くかもしれませんが、誰もテストしていません)
- certbot (または何らかのACME Let's encryptクライアント) - nginx (おすすめです。他のリバースプロキシを使う場合は、参考となる設定をこのリポジトリから探してください)
- certbot (または何らかのLet's Encrypt向けACMEクライアント)
### システムを準備する ### システムを準備する
* まずシステムをアップデートしてください。 * まずシステムをアップデートしてください。
``` ```
apt update && apt dist-upgrade sudo apt update
sudo apt full-upgrade
``` ```
* 複数のツールとpostgresqlをインストールします。あとで必要になるので * 上記に挙げたパッケージをインストールしておきます
``` ```
apt install git build-essential openssl ssh sudo postgresql-9.6 postgresql-contrib-9.6 sudo apt install git build-essential postgresql postgresql-contrib
``` ```
(postgresqlのバージョンは、あなたのディストロにあわせて変えてください。または、バージョン番号がいらないかもしれません。)
### ElixirとErlangをインストールします ### ElixirとErlangをインストールします
* Erlangのリポジトリをダウンロードおよびインストールします。 * Erlangのリポジトリをダウンロードおよびインストールします。
``` ```
wget -P /tmp/ https://packages.erlang-solutions.com/erlang-solutions_1.0_all.deb && sudo dpkg -i /tmp/erlang-solutions_1.0_all.deb wget -P /tmp/ https://packages.erlang-solutions.com/erlang-solutions_1.0_all.deb
sudo dpkg -i /tmp/erlang-solutions_1.0_all.deb
``` ```
* ElixirとErlangをインストールします、 * ElixirとErlangをインストールします、
``` ```
apt update && apt install elixir erlang-dev erlang-parsetools erlang-xmerl erlang-tools erlang-ssh sudo apt update
sudo apt install elixir erlang-dev erlang-parsetools erlang-xmerl erlang-tools erlang-ssh
``` ```
### Pleroma BE (バックエンド) をインストールします ### Pleroma BE (バックエンド) をインストールします
* 新しいユーザーを作ります。 * Pleroma用に新しいユーザーを作ります。
```
adduser pleroma
```
(Give it any password you want, make it STRONG)
* 新しいユーザーをsudoグループに入れます。
``` ```
usermod -aG sudo pleroma sudo useradd -r -s /bin/false -m -d /var/lib/pleroma -U pleroma
``` ```
* 新しいユーザーに変身し、ホームディレクトリに移動します。 **注意**: Pleromaユーザとして単発のコマンドを実行したい場合はは、`sudo -Hu pleroma command` を使ってください。シェルを使いたい場合は `sudo -Hu pleroma $SHELL`です。もし `sudo` を使わない場合は、rootユーザで `su -l pleroma -s $SHELL -c 'command'` とすることでコマンドを、`su -l pleroma -s $SHELL` とすることでシェルを開始できます。
```
su pleroma
cd ~
```
* Gitリポジトリをクローンします。 * Gitリポジトリをクローンします。
``` ```
git clone -b master https://git.pleroma.social/pleroma/pleroma sudo mkdir -p /opt/pleroma
sudo chown -R pleroma:pleroma /opt/pleroma
sudo -Hu pleroma git clone -b master https://git.pleroma.social/pleroma/pleroma /opt/pleroma
``` ```
* 新しいディレクトリに移動します。 * 新しいディレクトリに移動します。
``` ```
cd pleroma/ cd /opt/pleroma
``` ```
* Pleromaが依存するパッケージをインストールします。Hexをインストールしてもよいか聞かれたら、yesを入力してください。 * Pleromaが依存するパッケージをインストールします。Hexをインストールしてもよいか聞かれたら、yesを入力してください。
``` ```
mix deps.get sudo -Hu pleroma mix deps.get
``` ```
* コンフィギュレーションを生成します。 * コンフィギュレーションを生成します。
``` ```
mix pleroma.instance gen sudo -Hu pleroma mix pleroma.instance gen
``` ```
* rebar3をインストールしてもよいか聞かれたら、yesを入力してください。 * rebar3をインストールしてもよいか聞かれたら、yesを入力してください。
* この処理には時間がかかります。私もよく分かりませんが、何らかのコンパイルが行われているようです。 * このときにpleromaの一部がコンパイルされるため、この処理には時間がかかります。
* あなたのインスタンスについて、いくつかの質問があります。その回答は `config/generated_config.exs` というコンフィギュレーションファイルに保存されます。 * あなたのインスタンスについて、いくつかの質問されます。この質問により `config/generated_config.exs` という設定ファイルが生成されます。
**注意**: メディアプロクシを有効にすると回答して、なおかつ、キャッシュのURLは空欄のままにしている場合は、`generated_config.exs` を編集して、`base_url` で始まる行をコメントアウトまたは削除してください。そして、上にある行の `true` の後にあるコンマを消してください。
* コンフィギュレーションを確認して、もし問題なければ、ファイル名を変更してください。 * コンフィギュレーションを確認して、もし問題なければ、ファイル名を変更してください。
``` ```
mv config/{generated_config.exs,prod.secret.exs} mv config/{generated_config.exs,prod.secret.exs}
``` ```
* これまでのコマンドで、すでに `config/setup_db.psql` というファイルが作られています。このファイルをもとに、データベースを作成します。 * 先程のコマンドで、すでに `config/setup_db.psql` というファイルが作られています。このファイルをもとに、データベースを作成します。
``` ```
sudo su postgres -c 'psql -f config/setup_db.psql' sudo -Hu pleroma mix pleroma.instance gen
``` ```
* そして、データベースのグレーションを実行します。 * そして、データベースのマイグレーションを実行します。
``` ```
MIX_ENV=prod mix ecto.migrate sudo -Hu pleroma MIX_ENV=prod mix ecto.migrate
``` ```
* Pleromaを起動できるようになりました。 * これでPleromaを起動できるようになりました。
``` ```
MIX_ENV=prod mix phx.server sudo -Hu pleroma MIX_ENV=prod mix phx.server
``` ```
### インストールを終わらせる ### インストールの最終段階
あなたの新しいインスタンスを世界に向けて公開するには、nginxまたは何らかのウェブサーバー (プロクシ) を使用する必要があります。また、Pleroma のためにシステムサービスファイルを作成する必要があります。 あなたの新しいインスタンスを世界に向けて公開するには、nginx等のWebサーバやプロキシサーバをPleromaの前段に使用する必要があります。また、Pleroma のためにシステムサービスファイルを作成する必要があります。
#### Nginx #### Nginx
* まだインストールしていないなら、nginxをインストールします。 * まだインストールしていないなら、nginxをインストールします。
``` ```
apt install nginx sudo apt install nginx
``` ```
* SSLをセットアップします。他の方法でもよいですが、ここではcertbotを説明します。 * SSLをセットアップします。他の方法でもよいですが、ここではcertbotを説明します。
certbotを使うならば、まずそれをインストールします。 certbotを使うならば、まずそれをインストールします。
``` ```
apt install certbot sudo apt install certbot
``` ```
そしてセットアップします。 そしてセットアップします。
``` ```
mkdir -p /var/lib/letsencrypt/.well-known sudo mkdir -p /var/lib/letsencrypt/
% certbot certonly --email your@emailaddress --webroot -w /var/lib/letsencrypt/ -d yourdomain sudo certbot certonly --email <your@emailaddress> -d <yourdomain> --standalone
``` ```
もしうまくいかないときは、先にnginxを設定してください。ssl "on" を "off" に変えてから再試行してください。 もしうまくいかないときは、nginxが正しく動いていない可能性があります。先にnginxを設定してください。ssl "on" を "off" に変えてから再試行してください。
--- ---
* nginxコンフィギュレーションの例をnginxフォルダーにコピーします。 * nginxの設定ファイルサンプルをnginxフォルダーにコピーします。
``` ```
cp /home/pleroma/pleroma/installation/pleroma.nginx /etc/nginx/sites-enabled/pleroma.nginx sudo cp /opt/pleroma/installation/pleroma.nginx /etc/nginx/sites-available/pleroma.nginx
sudo ln -s /etc/nginx/sites-available/pleroma.nginx /etc/nginx/sites-enabled/pleroma.nginx
``` ```
* nginxを起動する前に、コンフィギュレーションを編集してください。例えば、サーバー名、証明書のパスなどを変更する必要があります。 * nginxを起動する前に、設定ファイルを編集してください。例えば、サーバー名、証明書のパスなどを変更する必要があります。
* nginxを再起動します。 * nginxを再起動します。
``` ```
systemctl reload nginx.service sudo systemctl enable --now nginx.service
``` ```
もし証明書を更新する必要が出てきた場合には、nginxの関連するlocationブロックのコメントアウトを外し、以下のコマンドを動かします。
```
sudo certbot certonly --email <your@emailaddress> -d <yourdomain> --webroot -w /var/lib/letsencrypt/
```
#### 他のWebサーバやプロキシ
これに関してはサンプルが `/opt/pleroma/installation/` にあるので、探してみてください。
#### Systemd サービス #### Systemd サービス
* サービスファイルの例をコピーします。 * サービスファイルのサンプルをコピーします。
``` ```
cp /home/pleroma/pleroma/installation/pleroma.service /usr/lib/systemd/system/pleroma.service sudo cp /opt/pleroma/installation/pleroma.service /etc/systemd/system/pleroma.service
``` ```
* サービスファイルを変更します。すべてのパスが正しいことを確認してください。また、`[Service]` セクションに以下の行があることを確認してください。 * サービスファイルを変更します。すべてのパスが正しいことを確認してください
* サービスを有効化し `pleroma.service` を開始してください
``` ```
Environment="MIX_ENV=prod" sudo systemctl enable --now pleroma.service
``` ```
* `pleroma.service` を enable および start してください。 #### 初期ユーザの作成
新たにインスタンスを作成したら、以下のコマンドにより管理者権限を持った初期ユーザを作成できます。
``` ```
systemctl enable --now pleroma.service sudo -Hu pleroma MIX_ENV=prod mix pleroma.user new <username> <your@emailaddress> --admin
``` ```
#### モデレーターを作る #### その他の設定とカスタマイズ
新たにユーザーを作ったら、モデレーター権限を与えたいかもしれません。以下のタスクで可能です。 * [Backup your instance](../administration/backup.md)
``` * [Hardening your instance](../configuration/hardening.md)
mix set_moderator username [true|false] * [How to activate mediaproxy](../configuration/howto_mediaproxy.md)
``` * [Updating your instance](../administration/updating.md)
モデレーターはすべてのポストを消すことができます。将来的には他のことも可能になるかもしれません。
#### メディアプロクシを有効にする
`generate_config` でメディアプロクシを有効にしているなら、すでにメディアプロクシが動作しています。あとから設定を変更したいなら、[How to activate mediaproxy](How-to-activate-mediaproxy) を見てください。
#### コンフィギュレーションとカスタマイズ
* [Backup your instance](backup.html)
* [Configuration tips](general-tips-for-customizing-pleroma-fe.html)
* [Hardening your instance](hardening.html)
* [How to activate mediaproxy](howto_mediaproxy.html)
* [Small Pleroma-FE customizations](small_customizations.html)
* [Updating your instance](updating.html)
## 質問ある? ## 質問ある?

View file

@ -283,12 +283,10 @@ If you opted to allow sudo for the `pleroma` user but would like to remove the a
#### Further reading #### Further reading
* [Backup your instance](backup.html) * [Backup your instance](../administration/backup.md)
* [Configuration tips](general-tips-for-customizing-pleroma-fe.html) * [Hardening your instance](../configuration/hardening.md)
* [Hardening your instance](hardening.html) * [How to activate mediaproxy](../configuration/howto_mediaproxy.md)
* [How to activate mediaproxy](howto_mediaproxy.html) * [Updating your instance](../administration/updating.md)
* [Small Pleroma-FE customizations](small_customizations.html)
* [Updating your instance](updating.html)
## Questions ## Questions

View file

@ -11,7 +11,7 @@ Benefits of OTP releases over from-source installs include:
* **Faster and less bug-prone mix tasks.** On a from-source install one has to wait untill a new Pleroma node is started for each mix task and they execute outside of the instance context (for example if a user was deleted via a mix task, the instance will have no knowledge of that and continue to display status count and follows before the cache expires). Mix tasks in OTP releases are executed by calling into a running instance via RPC, which solves both of these problems. * **Faster and less bug-prone mix tasks.** On a from-source install one has to wait untill a new Pleroma node is started for each mix task and they execute outside of the instance context (for example if a user was deleted via a mix task, the instance will have no knowledge of that and continue to display status count and follows before the cache expires). Mix tasks in OTP releases are executed by calling into a running instance via RPC, which solves both of these problems.
### Sounds great, how do I switch? ### Sounds great, how do I switch?
Currently we support Linux machines with GNU (e.g. Debian, Ubuntu) or musl (e.g. Alpine) libc and `x86_64`, `aarch64` or `armv7l` CPUs. If you are unsure, check the [Detecting flavour](otp_en.html#detecting-flavour) section in OTP install guide. If your platform is supported, proceed with the guide, if not check the [My platform is not supported](#my-platform-is-not-supported) section. Currently we support Linux machines with GNU (e.g. Debian, Ubuntu) or musl (e.g. Alpine) libc and `x86_64`, `aarch64` or `armv7l` CPUs. If you are unsure, check the [Detecting flavour](otp_en.md#detecting-flavour) section in OTP install guide. If your platform is supported, proceed with the guide, if not check the [My platform is not supported](#my-platform-is-not-supported) section.
### I don't think it is worth the effort, can I stay on a from-source install? ### I don't think it is worth the effort, can I stay on a from-source install?
Yes, currently there are no plans to deprecate them. Yes, currently there are no plans to deprecate them.
@ -70,7 +70,7 @@ and then copy custom emojis to `/var/lib/pleroma/static/emoji/custom`.
This is needed because storing custom emojis in the root directory is deprecated, but if you just move them to `/var/lib/pleroma/static/emoji/custom` it will break emoji urls on old posts. This is needed because storing custom emojis in the root directory is deprecated, but if you just move them to `/var/lib/pleroma/static/emoji/custom` it will break emoji urls on old posts.
Note that globs have been replaced with `pack_extensions`, so if your emojis are not in png/gif you should [modify the default value](config.html#emoji). Note that globs have been replaced with `pack_extensions`, so if your emojis are not in png/gif you should [modify the default value](../configuration/cheatsheet.md#emoji).
### Moving the config ### Moving the config
```sh ```sh
@ -86,7 +86,7 @@ mv ~pleroma/config/prod.secret.exs /etc/pleroma/config.exs
$EDITOR /etc/pleroma/config.exs $EDITOR /etc/pleroma/config.exs
``` ```
## Installing the release ## Installing the release
Before proceeding, get the flavour from [Detecting flavour](otp_en.html#detecting-flavour) section in OTP installation guide. Before proceeding, get the flavour from [Detecting flavour](otp_en.md#detecting-flavour) section in OTP installation guide.
```sh ```sh
# Delete all files in pleroma user's directory # Delete all files in pleroma user's directory
rm -r ~pleroma/* rm -r ~pleroma/*
@ -148,6 +148,6 @@ cp -f ~pleroma/installation/init.d/pleroma /etc/init.d/pleroma
rc-service pleroma start rc-service pleroma start
``` ```
## Running mix tasks ## Running mix tasks
Refer to [Running mix tasks](otp_en.html#running-mix-tasks) section from OTP release installation guide. Refer to [Running mix tasks](otp_en.md#running-mix-tasks) section from OTP release installation guide.
## Updating ## Updating
Refer to [Updating](otp_en.html#updating) section from OTP release installation guide. Refer to [Updating](otp_en.md#updating) section from OTP release installation guide.

View file

@ -42,7 +42,7 @@ apk add curl unzip ncurses postgresql postgresql-contrib nginx certbot
## Setup ## Setup
### Configuring PostgreSQL ### Configuring PostgreSQL
#### (Optional) Installing RUM indexes #### (Optional) Installing RUM indexes
RUM indexes are an alternative indexing scheme that is not included in PostgreSQL by default. You can read more about them on the [Configuration page](config.html#rum-indexing-for-full-text-search). They are completely optional and most of the time are not worth it, especially if you are running a single user instance (unless you absolutely need ordered search results). RUM indexes are an alternative indexing scheme that is not included in PostgreSQL by default. You can read more about them on the [Configuration page](../configuration/cheatsheet.md#rum-indexing-for-full-text-search). They are completely optional and most of the time are not worth it, especially if you are running a single user instance (unless you absolutely need ordered search results).
Debian/Ubuntu (available only on Buster/19.04): Debian/Ubuntu (available only on Buster/19.04):
```sh ```sh
@ -262,8 +262,8 @@ su pleroma -s $SHELL -lc "./bin/pleroma_ctl migrate"
But you should **always check the release notes/changelog** in case there are config deprecations, special update steps, etc. But you should **always check the release notes/changelog** in case there are config deprecations, special update steps, etc.
## Further reading ## Further reading
* [Configuration](config.html)
* [Pleroma's base config.exs](https://git.pleroma.social/pleroma/pleroma/blob/master/config/config.exs) * [Backup your instance](../administration/backup.md)
* [Hardening your instance](hardening.html) * [Hardening your instance](../configuration/hardening.md)
* [Pleroma Clients](clients.html) * [How to activate mediaproxy](../configuration/howto_mediaproxy.md)
* [Emoji pack manager](Mix.Tasks.Pleroma.Emoji.html) * [Updating your instance](../administration/updating.md)

View file

@ -215,7 +215,9 @@
]} ]}
]}, ]},
{ 5222, ejabberd_c2s, [ %% If you want dual stack, you have to clone this entire config stanza
%% and change the bind to "::"
{ {5222, "0.0.0.0"}, ejabberd_c2s, [
%% %%
%% If TLS is compiled in and you installed a SSL %% If TLS is compiled in and you installed a SSL
@ -246,7 +248,9 @@
%% {max_stanza_size, 65536} %% {max_stanza_size, 65536}
%% ]}, %% ]},
{ 5269, ejabberd_s2s_in, [ %% If you want dual stack, you have to clone this entire config stanza
%% and change the bind to "::"
{ {5269, "0.0.0.0"}, ejabberd_s2s_in, [
{shaper, s2s_shaper}, {shaper, s2s_shaper},
{max_stanza_size, 131072}, {max_stanza_size, 131072},
{protocol_options, ["no_sslv3"]} {protocol_options, ["no_sslv3"]}

View file

@ -70,6 +70,7 @@ server {
proxy_set_header Upgrade $http_upgrade; proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade"; proxy_set_header Connection "upgrade";
proxy_set_header Host $http_host; proxy_set_header Host $http_host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
# this is explicitly IPv4 since Pleroma.Web.Endpoint binds on IPv4 only # this is explicitly IPv4 since Pleroma.Web.Endpoint binds on IPv4 only
# and `localhost.` resolves to [::0] on some systems: see issue #930 # and `localhost.` resolves to [::0] on some systems: see issue #930

View file

@ -1,5 +1,5 @@
# Pleroma: A lightweight social networking server # Pleroma: A lightweight social networking server
# Copyright © 2017-2018 Pleroma Authors <https://pleroma.social/> # Copyright © 2017-2019 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only # SPDX-License-Identifier: AGPL-3.0-only
defmodule Mix.Pleroma do defmodule Mix.Pleroma do

View file

@ -27,7 +27,7 @@ def run(["tag"]) do
}) })
end end
def run(["render_timeline", nickname]) do def run(["render_timeline", nickname | _] = args) do
start_pleroma() start_pleroma()
user = Pleroma.User.get_by_nickname(nickname) user = Pleroma.User.get_by_nickname(nickname)
@ -37,33 +37,37 @@ def run(["render_timeline", nickname]) do
|> Map.put("blocking_user", user) |> Map.put("blocking_user", user)
|> Map.put("muting_user", user) |> Map.put("muting_user", user)
|> Map.put("user", user) |> Map.put("user", user)
|> Map.put("limit", 80) |> Map.put("limit", 4096)
|> Pleroma.Web.ActivityPub.ActivityPub.fetch_public_activities() |> Pleroma.Web.ActivityPub.ActivityPub.fetch_public_activities()
|> Enum.reverse() |> Enum.reverse()
inputs = %{ inputs = %{
"One activity" => Enum.take_random(activities, 1), "1 activity" => Enum.take_random(activities, 1),
"Ten activities" => Enum.take_random(activities, 10), "10 activities" => Enum.take_random(activities, 10),
"Twenty activities" => Enum.take_random(activities, 20), "20 activities" => Enum.take_random(activities, 20),
"Forty activities" => Enum.take_random(activities, 40), "40 activities" => Enum.take_random(activities, 40),
"Eighty activities" => Enum.take_random(activities, 80) "80 activities" => Enum.take_random(activities, 80)
} }
inputs =
if Enum.at(args, 2) == "extended" do
Map.merge(inputs, %{
"200 activities" => Enum.take_random(activities, 200),
"500 activities" => Enum.take_random(activities, 500),
"2000 activities" => Enum.take_random(activities, 2000),
"4096 activities" => Enum.take_random(activities, 4096)
})
else
inputs
end
Benchee.run( Benchee.run(
%{ %{
"Parallel rendering" => fn activities ->
Pleroma.Web.MastodonAPI.StatusView.render("index.json", %{
activities: activities,
for: user,
as: :activity
})
end,
"Standart rendering" => fn activities -> "Standart rendering" => fn activities ->
Pleroma.Web.MastodonAPI.StatusView.render("index.json", %{ Pleroma.Web.MastodonAPI.StatusView.render("index.json", %{
activities: activities, activities: activities,
for: user, for: user,
as: :activity, as: :activity
parallel: false
}) })
end end
}, },

View file

@ -8,18 +8,7 @@ defmodule Mix.Tasks.Pleroma.Config do
alias Pleroma.Repo alias Pleroma.Repo
alias Pleroma.Web.AdminAPI.Config alias Pleroma.Web.AdminAPI.Config
@shortdoc "Manages the location of the config" @shortdoc "Manages the location of the config"
@moduledoc """ @moduledoc File.read!("docs/administration/CLI_tasks/config.md")
Manages the location of the config.
## Transfers config from file to DB.
mix pleroma.config migrate_to_db
## Transfers config from DB to file `config/env.exported_from_db.secret.exs`
mix pleroma.config migrate_from_db ENV
"""
def run(["migrate_to_db"]) do def run(["migrate_to_db"]) do
start_pleroma() start_pleroma()

View file

@ -1,5 +1,5 @@
# Pleroma: A lightweight social networking server # Pleroma: A lightweight social networking server
# Copyright © 2017-2018 Pleroma Authors <https://pleroma.social/> # Copyright © 2017-2019 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only # SPDX-License-Identifier: AGPL-3.0-only
defmodule Mix.Tasks.Pleroma.Database do defmodule Mix.Tasks.Pleroma.Database do
@ -13,34 +13,8 @@ defmodule Mix.Tasks.Pleroma.Database do
use Mix.Task use Mix.Task
@shortdoc "A collection of database related tasks" @shortdoc "A collection of database related tasks"
@moduledoc """ @moduledoc File.read!("docs/administration/CLI_tasks/database.md")
A collection of database related tasks
## Replace embedded objects with their references
Replaces embedded objects with references to them in the `objects` table. Only needs to be ran once. The reason why this is not a migration is because it could significantly increase the database size after being ran, however after this `VACUUM FULL` will be able to reclaim about 20% (really depends on what is in the database, your mileage may vary) of the db size before the migration.
mix pleroma.database remove_embedded_objects
Options:
- `--vacuum` - run `VACUUM FULL` after the embedded objects are replaced with their references
## Prune old objects from the database
mix pleroma.database prune_objects
## Create a conversation for all existing DMs. Can be safely re-run.
mix pleroma.database bump_all_conversations
## Remove duplicated items from following and update followers count for all users
mix pleroma.database update_users_following_followers_counts
## Fix the pre-existing "likes" collections for all objects
mix pleroma.database fix_likes_collections
"""
def run(["remove_embedded_objects" | args]) do def run(["remove_embedded_objects" | args]) do
{options, [], []} = {options, [], []} =
OptionParser.parse( OptionParser.parse(

View file

@ -2,16 +2,8 @@ defmodule Mix.Tasks.Pleroma.Digest do
use Mix.Task use Mix.Task
@shortdoc "Manages digest emails" @shortdoc "Manages digest emails"
@moduledoc """ @moduledoc File.read!("docs/administration/CLI_tasks/digest.md")
Manages digest emails
## Send digest email since given date (user registration date by default)
ignoring user activity status.
``mix pleroma.digest test <nickname> <since_date>``
Example: ``mix pleroma.digest test donaldtheduck 2019-05-20``
"""
def run(["test", nickname | opts]) do def run(["test", nickname | opts]) do
Mix.Pleroma.start_pleroma() Mix.Pleroma.start_pleroma()

View file

@ -0,0 +1,42 @@
defmodule Mix.Tasks.Pleroma.Docs do
use Mix.Task
import Mix.Pleroma
@shortdoc "Generates docs from descriptions.exs"
@moduledoc """
Generates docs from `descriptions.exs`.
Supports two formats: `markdown` and `json`.
## Generate Markdown docs
`mix pleroma.docs`
## Generate JSON docs
`mix pleroma.docs json`
"""
def run(["json"]) do
do_run(Pleroma.Docs.JSON)
end
def run(_) do
do_run(Pleroma.Docs.Markdown)
end
defp do_run(implementation) do
start_pleroma()
with {descriptions, _paths} <- Mix.Config.eval!("config/description.exs"),
{:ok, file_path} <-
Pleroma.Docs.Generator.process(
implementation,
descriptions[:pleroma][:config_description]
) do
type = if implementation == Pleroma.Docs.Markdown, do: "Markdown", else: "JSON"
Mix.shell().info([:green, "#{type} docs successfully generated to #{file_path}."])
end
end
end

View file

@ -1,5 +1,5 @@
# Pleroma: A lightweight social networking server # Pleroma: A lightweight social networking server
# Copyright © 2017-2018 Pleroma Authors <https://pleroma.social/> # Copyright © 2017-2019 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-onl # SPDX-License-Identifier: AGPL-3.0-onl
defmodule Mix.Tasks.Pleroma.Ecto do defmodule Mix.Tasks.Pleroma.Ecto do

View file

@ -1,5 +1,5 @@
# Pleroma: A lightweight social networking server # Pleroma: A lightweight social networking server
# Copyright © 2017-2018 Pleroma Authors <https://pleroma.social/> # Copyright © 2017-2019 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-onl # SPDX-License-Identifier: AGPL-3.0-onl
defmodule Mix.Tasks.Pleroma.Ecto.Migrate do defmodule Mix.Tasks.Pleroma.Ecto.Migrate do

View file

@ -1,5 +1,5 @@
# Pleroma: A lightweight social networking server # Pleroma: A lightweight social networking server
# Copyright © 2017-2018 Pleroma Authors <https://pleroma.social/> # Copyright © 2017-2019 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-onl # SPDX-License-Identifier: AGPL-3.0-onl
defmodule Mix.Tasks.Pleroma.Ecto.Rollback do defmodule Mix.Tasks.Pleroma.Ecto.Rollback do

View file

@ -1,59 +1,12 @@
# Pleroma: A lightweight social networking server # Pleroma: A lightweight social networking server
# Copyright © 2017-2018 Pleroma Authors <https://pleroma.social/> # Copyright © 2017-2019 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only # SPDX-License-Identifier: AGPL-3.0-only
defmodule Mix.Tasks.Pleroma.Emoji do defmodule Mix.Tasks.Pleroma.Emoji do
use Mix.Task use Mix.Task
@shortdoc "Manages emoji packs" @shortdoc "Manages emoji packs"
@moduledoc """ @moduledoc File.read!("docs/administration/CLI_tasks/emoji.md")
Manages emoji packs
## ls-packs
mix pleroma.emoji ls-packs [OPTION...]
Lists the emoji packs and metadata specified in the manifest.
### Options
- `-m, --manifest PATH/URL` - path to a custom manifest, it can
either be an URL starting with `http`, in that case the
manifest will be fetched from that address, or a local path
## get-packs
mix pleroma.emoji get-packs [OPTION...] PACKS
Fetches, verifies and installs the specified PACKS from the
manifest into the `STATIC-DIR/emoji/PACK-NAME`
### Options
- `-m, --manifest PATH/URL` - same as ls-packs
## gen-pack
mix pleroma.emoji gen-pack PACK-URL
Creates a new manifest entry and a file list from the specified
remote pack file. Currently, only .zip archives are recognized
as remote pack files and packs are therefore assumed to be zip
archives. This command is intended to run interactively and will
first ask you some basic questions about the pack, then download
the remote file and generate an SHA256 checksum for it, then
generate an emoji file list for you.
The manifest entry will either be written to a newly created
`index.json` file or appended to the existing one, *replacing*
the old pack with the same name if it was in the file previously.
The file list will be written to the file specified previously,
*replacing* that file. You _should_ check that the file list doesn't
contain anything you don't need in the pack, that is, anything that is
not an emoji (the whole pack is downloaded, but only emoji files
are extracted).
"""
def run(["ls-packs" | args]) do def run(["ls-packs" | args]) do
Application.ensure_all_started(:hackney) Application.ensure_all_started(:hackney)
@ -235,7 +188,7 @@ def run(["gen-pack", src]) do
cwd: tmp_pack_dir cwd: tmp_pack_dir
) )
emoji_map = Pleroma.Emoji.make_shortcode_to_file_map(tmp_pack_dir, exts) emoji_map = Pleroma.Emoji.Loader.make_shortcode_to_file_map(tmp_pack_dir, exts)
File.write!(files_name, Jason.encode!(emoji_map, pretty: true)) File.write!(files_name, Jason.encode!(emoji_map, pretty: true))

View file

@ -1,5 +1,5 @@
# Pleroma: A lightweight social networking server # Pleroma: A lightweight social networking server
# Copyright © 2017-2018 Pleroma Authors <https://pleroma.social/> # Copyright © 2017-2019 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only # SPDX-License-Identifier: AGPL-3.0-only
defmodule Mix.Tasks.Pleroma.Instance do defmodule Mix.Tasks.Pleroma.Instance do
@ -7,36 +7,7 @@ defmodule Mix.Tasks.Pleroma.Instance do
import Mix.Pleroma import Mix.Pleroma
@shortdoc "Manages Pleroma instance" @shortdoc "Manages Pleroma instance"
@moduledoc """ @moduledoc File.read!("docs/administration/CLI_tasks/instance.md")
Manages Pleroma instance.
## Generate a new instance config.
mix pleroma.instance gen [OPTION...]
If any options are left unspecified, you will be prompted interactively
## Options
- `-f`, `--force` - overwrite any output files
- `-o PATH`, `--output PATH` - the output file for the generated configuration
- `--output-psql PATH` - the output file for the generated PostgreSQL setup
- `--domain DOMAIN` - the domain of your instance
- `--instance-name INSTANCE_NAME` - the name of your instance
- `--admin-email ADMIN_EMAIL` - the email address of the instance admin
- `--notify-email NOTIFY_EMAIL` - email address for notifications
- `--dbhost HOSTNAME` - the hostname of the PostgreSQL database to use
- `--dbname DBNAME` - the name of the database to use
- `--dbuser DBUSER` - the user (aka role) to use for the database connection
- `--dbpass DBPASS` - the password to use for the database connection
- `--rum Y/N` - Whether to enable RUM indexes
- `--indexable Y/N` - Allow/disallow indexing site by search engines
- `--db-configurable Y/N` - Allow/disallow configuring instance from admin part
- `--uploads-dir` - the directory uploads go in when using a local uploader
- `--static-dir` - the directory custom public files should be read from (custom emojis, frontend bundle overrides, robots.txt, etc.)
- `--listen-ip` - the ip the app should listen to, defaults to 127.0.0.1
- `--listen-port` - the port the app should listen to, defaults to 4000
"""
def run(["gen" | rest]) do def run(["gen" | rest]) do
{options, [], []} = {options, [], []} =

View file

@ -1,5 +1,5 @@
# Pleroma: A lightweight social networking server # Pleroma: A lightweight social networking server
# Copyright © 2017-2018 Pleroma Authors <https://pleroma.social/> # Copyright © 2017-2019 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only # SPDX-License-Identifier: AGPL-3.0-only
defmodule Mix.Tasks.Pleroma.Relay do defmodule Mix.Tasks.Pleroma.Relay do
@ -9,25 +9,8 @@ defmodule Mix.Tasks.Pleroma.Relay do
alias Pleroma.Web.ActivityPub.Relay alias Pleroma.Web.ActivityPub.Relay
@shortdoc "Manages remote relays" @shortdoc "Manages remote relays"
@moduledoc """ @moduledoc File.read!("docs/administration/CLI_tasks/relay.md")
Manages remote relays
## Follow a remote relay
``mix pleroma.relay follow <relay_url>``
Example: ``mix pleroma.relay follow https://example.org/relay``
## Unfollow a remote relay
``mix pleroma.relay unfollow <relay_url>``
Example: ``mix pleroma.relay unfollow https://example.org/relay``
## List relay subscriptions
``mix pleroma.relay list``
"""
def run(["follow", target]) do def run(["follow", target]) do
start_pleroma() start_pleroma()

View file

@ -1,5 +1,5 @@
# Pleroma: A lightweight social networking server # Pleroma: A lightweight social networking server
# Copyright © 2017-2018 Pleroma Authors <https://pleroma.social/> # Copyright © 2017-2019 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only # SPDX-License-Identifier: AGPL-3.0-only
defmodule Mix.Tasks.Pleroma.Uploads do defmodule Mix.Tasks.Pleroma.Uploads do
@ -12,16 +12,8 @@ defmodule Mix.Tasks.Pleroma.Uploads do
@log_every 50 @log_every 50
@shortdoc "Migrates uploads from local to remote storage" @shortdoc "Migrates uploads from local to remote storage"
@moduledoc """ @moduledoc File.read!("docs/administration/CLI_tasks/uploads.md")
Manages uploads
## Migrate uploads from local to remote storage
mix pleroma.uploads migrate_local TARGET_UPLOADER [OPTIONS...]
Options:
- `--delete` - delete local uploads after migrating them to the target uploader
A list of available uploaders can be seen in config.exs
"""
def run(["migrate_local", target_uploader | args]) do def run(["migrate_local", target_uploader | args]) do
delete? = Enum.member?(args, "--delete") delete? = Enum.member?(args, "--delete")
start_pleroma() start_pleroma()

View file

@ -1,96 +1,17 @@
# Pleroma: A lightweight social networking server # Pleroma: A lightweight social networking server
# Copyright © 2017-2018 Pleroma Authors <https://pleroma.social/> # Copyright © 2017-2019 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only # SPDX-License-Identifier: AGPL-3.0-only
defmodule Mix.Tasks.Pleroma.User do defmodule Mix.Tasks.Pleroma.User do
use Mix.Task use Mix.Task
import Ecto.Changeset
import Mix.Pleroma import Mix.Pleroma
alias Pleroma.User alias Pleroma.User
alias Pleroma.UserInviteToken alias Pleroma.UserInviteToken
alias Pleroma.Web.OAuth alias Pleroma.Web.OAuth
@shortdoc "Manages Pleroma users" @shortdoc "Manages Pleroma users"
@moduledoc """ @moduledoc File.read!("docs/administration/CLI_tasks/user.md")
Manages Pleroma users.
## Create a new user.
mix pleroma.user new NICKNAME EMAIL [OPTION...]
Options:
- `--name NAME` - the user's name (i.e., "Lain Iwakura")
- `--bio BIO` - the user's bio
- `--password PASSWORD` - the user's password
- `--moderator`/`--no-moderator` - whether the user is a moderator
- `--admin`/`--no-admin` - whether the user is an admin
- `-y`, `--assume-yes`/`--no-assume-yes` - whether to assume yes to all questions
## Generate an invite link.
mix pleroma.user invite [OPTION...]
Options:
- `--expires-at DATE` - last day on which token is active (e.g. "2019-04-05")
- `--max-use NUMBER` - maximum numbers of token uses
## List generated invites
mix pleroma.user invites
## Revoke invite
mix pleroma.user revoke_invite TOKEN OR TOKEN_ID
## Delete the user's account.
mix pleroma.user rm NICKNAME
## Delete the user's activities.
mix pleroma.user delete_activities NICKNAME
## Sign user out from all applications (delete user's OAuth tokens and authorizations).
mix pleroma.user sign_out NICKNAME
## Deactivate or activate the user's account.
mix pleroma.user toggle_activated NICKNAME
## Unsubscribe local users from user's account and deactivate it
mix pleroma.user unsubscribe NICKNAME
## Unsubscribe local users from an entire instance and deactivate all accounts
mix pleroma.user unsubscribe_all_from_instance INSTANCE
## Create a password reset link.
mix pleroma.user reset_password NICKNAME
## Set the value of the given user's settings.
mix pleroma.user set NICKNAME [OPTION...]
Options:
- `--locked`/`--no-locked` - whether the user's account is locked
- `--moderator`/`--no-moderator` - whether the user is a moderator
- `--admin`/`--no-admin` - whether the user is an admin
## Add tags to a user.
mix pleroma.user tag NICKNAME TAGS
## Delete tags from a user.
mix pleroma.user untag NICKNAME TAGS
## Toggle confirmation of the user's account.
mix pleroma.user toggle_confirmed NICKNAME
"""
def run(["new", nickname, email | rest]) do def run(["new", nickname, email | rest]) do
{options, [], []} = {options, [], []} =
OptionParser.parse( OptionParser.parse(
@ -228,9 +149,9 @@ def run(["unsubscribe", nickname]) do
shell_info("Deactivating #{user.nickname}") shell_info("Deactivating #{user.nickname}")
User.deactivate(user) User.deactivate(user)
{:ok, friends} = User.get_friends(user) user
|> User.get_friends()
Enum.each(friends, fn friend -> |> Enum.each(fn friend ->
user = User.get_cached_by_id(user.id) user = User.get_cached_by_id(user.id)
shell_info("Unsubscribing #{friend.nickname} from #{user.nickname}") shell_info("Unsubscribing #{friend.nickname} from #{user.nickname}")
@ -405,7 +326,7 @@ def run(["delete_activities", nickname]) do
start_pleroma() start_pleroma()
with %User{local: true} = user <- User.get_cached_by_nickname(nickname) do with %User{local: true} = user <- User.get_cached_by_nickname(nickname) do
{:ok, _} = User.delete_user_activities(user) User.delete_user_activities(user)
shell_info("User #{nickname} statuses deleted.") shell_info("User #{nickname} statuses deleted.")
else else
_ -> _ ->
@ -443,39 +364,21 @@ def run(["sign_out", nickname]) do
end end
defp set_moderator(user, value) do defp set_moderator(user, value) do
info_cng = User.Info.admin_api_update(user.info, %{is_moderator: value}) {:ok, user} = User.update_info(user, &User.Info.admin_api_update(&1, %{is_moderator: value}))
user_cng =
Ecto.Changeset.change(user)
|> put_embed(:info, info_cng)
{:ok, user} = User.update_and_set_cache(user_cng)
shell_info("Moderator status of #{user.nickname}: #{user.info.is_moderator}") shell_info("Moderator status of #{user.nickname}: #{user.info.is_moderator}")
user user
end end
defp set_admin(user, value) do defp set_admin(user, value) do
info_cng = User.Info.admin_api_update(user.info, %{is_admin: value}) {:ok, user} = User.update_info(user, &User.Info.admin_api_update(&1, %{is_admin: value}))
user_cng =
Ecto.Changeset.change(user)
|> put_embed(:info, info_cng)
{:ok, user} = User.update_and_set_cache(user_cng)
shell_info("Admin status of #{user.nickname}: #{user.info.is_admin}") shell_info("Admin status of #{user.nickname}: #{user.info.is_admin}")
user user
end end
defp set_locked(user, value) do defp set_locked(user, value) do
info_cng = User.Info.user_upgrade(user.info, %{locked: value}) {:ok, user} = User.update_info(user, &User.Info.user_upgrade(&1, %{locked: value}))
user_cng =
Ecto.Changeset.change(user)
|> put_embed(:info, info_cng)
{:ok, user} = User.update_and_set_cache(user_cng)
shell_info("Locked status of #{user.nickname}: #{user.info.locked}") shell_info("Locked status of #{user.nickname}: #{user.info.locked}")
user user

View file

@ -6,6 +6,7 @@ defmodule Pleroma.Activity do
use Ecto.Schema use Ecto.Schema
alias Pleroma.Activity alias Pleroma.Activity
alias Pleroma.Activity.Queries
alias Pleroma.ActivityExpiration alias Pleroma.ActivityExpiration
alias Pleroma.Bookmark alias Pleroma.Bookmark
alias Pleroma.Notification alias Pleroma.Notification
@ -20,7 +21,7 @@ defmodule Pleroma.Activity do
@type t :: %__MODULE__{} @type t :: %__MODULE__{}
@type actor :: String.t() @type actor :: String.t()
@primary_key {:id, Pleroma.FlakeId, autogenerate: true} @primary_key {:id, FlakeId.Ecto.CompatType, autogenerate: true}
# https://github.com/tootsuite/mastodon/blob/master/app/models/notification.rb#L19 # https://github.com/tootsuite/mastodon/blob/master/app/models/notification.rb#L19
@mastodon_notification_types %{ @mastodon_notification_types %{
@ -65,8 +66,8 @@ defmodule Pleroma.Activity do
timestamps() timestamps()
end end
def with_joined_object(query) do def with_joined_object(query, join_type \\ :inner) do
join(query, :inner, [activity], o in Object, join(query, join_type, [activity], o in Object,
on: on:
fragment( fragment(
"(?->>'id') = COALESCE(?->'object'->>'id', ?->>'object')", "(?->>'id') = COALESCE(?->'object'->>'id', ?->>'object')",
@ -78,10 +79,10 @@ def with_joined_object(query) do
) )
end end
def with_preloaded_object(query) do def with_preloaded_object(query, join_type \\ :inner) do
query query
|> has_named_binding?(:object) |> has_named_binding?(:object)
|> if(do: query, else: with_joined_object(query)) |> if(do: query, else: with_joined_object(query, join_type))
|> preload([activity, object: object], object: object) |> preload([activity, object: object], object: object)
end end
@ -107,12 +108,9 @@ def with_set_thread_muted_field(query, %User{} = user) do
def with_set_thread_muted_field(query, _), do: query def with_set_thread_muted_field(query, _), do: query
def get_by_ap_id(ap_id) do def get_by_ap_id(ap_id) do
Repo.one( ap_id
from( |> Queries.by_ap_id()
activity in Activity, |> Repo.one()
where: fragment("(?)->>'id' = ?", activity.data, ^to_string(ap_id))
)
)
end end
def get_bookmark(%Activity{} = activity, %User{} = user) do def get_bookmark(%Activity{} = activity, %User{} = user) do
@ -133,91 +131,55 @@ def change(struct, params \\ %{}) do
end end
def get_by_ap_id_with_object(ap_id) do def get_by_ap_id_with_object(ap_id) do
Repo.one( ap_id
from( |> Queries.by_ap_id()
activity in Activity, |> with_preloaded_object(:left)
where: fragment("(?)->>'id' = ?", activity.data, ^to_string(ap_id)), |> Repo.one()
left_join: o in Object,
on:
fragment(
"(?->>'id') = COALESCE(?->'object'->>'id', ?->>'object')",
o.data,
activity.data,
activity.data
),
preload: [object: o]
)
)
end end
@spec get_by_id(String.t()) :: Activity.t() | nil
def get_by_id(id) do def get_by_id(id) do
case FlakeId.flake_id?(id) do
true ->
Activity Activity
|> where([a], a.id == ^id) |> where([a], a.id == ^id)
|> restrict_deactivated_users() |> restrict_deactivated_users()
|> Repo.one() |> Repo.one()
_ ->
nil
end
end end
def get_by_id_with_object(id) do def get_by_id_with_object(id) do
from(activity in Activity, Activity
where: activity.id == ^id, |> where(id: ^id)
inner_join: o in Object, |> with_preloaded_object()
on:
fragment(
"(?->>'id') = COALESCE(?->'object'->>'id', ?->>'object')",
o.data,
activity.data,
activity.data
),
preload: [object: o]
)
|> Repo.one() |> Repo.one()
end end
def by_object_ap_id(ap_id) do def all_by_ids_with_object(ids) do
from( Activity
activity in Activity, |> where([a], a.id in ^ids)
where: |> with_preloaded_object()
fragment( |> Repo.all()
"coalesce((?)->'object'->>'id', (?)->>'object') = ?",
activity.data,
activity.data,
^to_string(ap_id)
)
)
end end
def create_by_object_ap_id(ap_ids) when is_list(ap_ids) do @doc """
from( Accepts `ap_id` or list of `ap_id`.
activity in Activity, Returns a query.
where: """
fragment( @spec create_by_object_ap_id(String.t() | [String.t()]) :: Ecto.Queryable.t()
"coalesce((?)->'object'->>'id', (?)->>'object') = ANY(?)", def create_by_object_ap_id(ap_id) do
activity.data, ap_id
activity.data, |> Queries.by_object_id()
^ap_ids |> Queries.by_type("Create")
),
where: fragment("(?)->>'type' = 'Create'", activity.data)
)
end end
def create_by_object_ap_id(ap_id) when is_binary(ap_id) do
from(
activity in Activity,
where:
fragment(
"coalesce((?)->'object'->>'id', (?)->>'object') = ?",
activity.data,
activity.data,
^to_string(ap_id)
),
where: fragment("(?)->>'type' = 'Create'", activity.data)
)
end
def create_by_object_ap_id(_), do: nil
def get_all_create_by_object_ap_id(ap_id) do def get_all_create_by_object_ap_id(ap_id) do
Repo.all(create_by_object_ap_id(ap_id)) ap_id
|> create_by_object_ap_id()
|> Repo.all()
end end
def get_create_by_object_ap_id(ap_id) when is_binary(ap_id) do def get_create_by_object_ap_id(ap_id) when is_binary(ap_id) do
@ -228,54 +190,17 @@ def get_create_by_object_ap_id(ap_id) when is_binary(ap_id) do
def get_create_by_object_ap_id(_), do: nil def get_create_by_object_ap_id(_), do: nil
def create_by_object_ap_id_with_object(ap_ids) when is_list(ap_ids) do @doc """
from( Accepts `ap_id` or list of `ap_id`.
activity in Activity, Returns a query.
where: """
fragment( @spec create_by_object_ap_id_with_object(String.t() | [String.t()]) :: Ecto.Queryable.t()
"coalesce((?)->'object'->>'id', (?)->>'object') = ANY(?)", def create_by_object_ap_id_with_object(ap_id) do
activity.data, ap_id
activity.data, |> create_by_object_ap_id()
^ap_ids |> with_preloaded_object()
),
where: fragment("(?)->>'type' = 'Create'", activity.data),
inner_join: o in Object,
on:
fragment(
"(?->>'id') = COALESCE(?->'object'->>'id', ?->>'object')",
o.data,
activity.data,
activity.data
),
preload: [object: o]
)
end end
def create_by_object_ap_id_with_object(ap_id) when is_binary(ap_id) do
from(
activity in Activity,
where:
fragment(
"coalesce((?)->'object'->>'id', (?)->>'object') = ?",
activity.data,
activity.data,
^to_string(ap_id)
),
where: fragment("(?)->>'type' = 'Create'", activity.data),
inner_join: o in Object,
on:
fragment(
"(?->>'id') = COALESCE(?->'object'->>'id', ?->>'object')",
o.data,
activity.data,
activity.data
),
preload: [object: o]
)
end
def create_by_object_ap_id_with_object(_), do: nil
def get_create_by_object_ap_id_with_object(ap_id) when is_binary(ap_id) do def get_create_by_object_ap_id_with_object(ap_id) when is_binary(ap_id) do
ap_id ap_id
|> create_by_object_ap_id_with_object() |> create_by_object_ap_id_with_object()
@ -299,7 +224,8 @@ def normalize(ap_id) when is_binary(ap_id), do: get_by_ap_id_with_object(ap_id)
def normalize(_), do: nil def normalize(_), do: nil
def delete_by_ap_id(id) when is_binary(id) do def delete_by_ap_id(id) when is_binary(id) do
by_object_ap_id(id) id
|> Queries.by_object_id()
|> select([u], u) |> select([u], u)
|> Repo.delete_all() |> Repo.delete_all()
|> elem(1) |> elem(1)
@ -308,10 +234,19 @@ def delete_by_ap_id(id) when is_binary(id) do
%{data: %{"type" => "Create", "object" => %{"id" => ap_id}}} -> ap_id == id %{data: %{"type" => "Create", "object" => %{"id" => ap_id}}} -> ap_id == id
_ -> nil _ -> nil
end) end)
|> purge_web_resp_cache()
end end
def delete_by_ap_id(_), do: nil def delete_by_ap_id(_), do: nil
defp purge_web_resp_cache(%Activity{} = activity) do
%{path: path} = URI.parse(activity.data["id"])
Cachex.del(:web_resp_cache, path)
activity
end
defp purge_web_resp_cache(nil), do: nil
for {ap_type, type} <- @mastodon_notification_types do for {ap_type, type} <- @mastodon_notification_types do
def mastodon_notification_type(%Activity{data: %{"type" => unquote(ap_type)}}), def mastodon_notification_type(%Activity{data: %{"type" => unquote(ap_type)}}),
do: unquote(type) do: unquote(type)
@ -334,40 +269,19 @@ def all_by_actor_and_id(actor, status_ids) do
end end
def follow_requests_for_actor(%Pleroma.User{ap_id: ap_id}) do def follow_requests_for_actor(%Pleroma.User{ap_id: ap_id}) do
from( ap_id
a in Activity, |> Queries.by_object_id()
where: |> Queries.by_type("Follow")
fragment( |> where([a], fragment("? ->> 'state' = 'pending'", a.data))
"? ->> 'type' = 'Follow'",
a.data
),
where:
fragment(
"? ->> 'state' = 'pending'",
a.data
),
where:
fragment(
"coalesce((?)->'object'->>'id', (?)->>'object') = ?",
a.data,
a.data,
^ap_id
)
)
end
@spec query_by_actor(actor()) :: Ecto.Query.t()
def query_by_actor(actor) do
from(a in Activity, where: a.actor == ^actor)
end end
def restrict_deactivated_users(query) do def restrict_deactivated_users(query) do
deactivated_users =
from(u in User.Query.build(deactivated: true), select: u.ap_id)
|> Repo.all()
from(activity in query, from(activity in query,
where: where: activity.actor not in ^deactivated_users
fragment(
"? not in (SELECT ap_id FROM users WHERE info->'deactivated' @> 'true')",
activity.actor
)
) )
end end

View file

@ -0,0 +1,63 @@
# Pleroma: A lightweight social networking server
# Copyright © 2017-2019 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.Activity.Ir.Topics do
alias Pleroma.Object
alias Pleroma.Web.ActivityPub.Visibility
def get_activity_topics(activity) do
activity
|> Object.normalize()
|> generate_topics(activity)
|> List.flatten()
end
defp generate_topics(%{data: %{"type" => "Answer"}}, _) do
[]
end
defp generate_topics(object, activity) do
["user", "list"] ++ visibility_tags(object, activity)
end
defp visibility_tags(object, activity) do
case Visibility.get_visibility(activity) do
"public" ->
if activity.local do
["public", "public:local"]
else
["public"]
end
|> item_creation_tags(object, activity)
"direct" ->
["direct"]
_ ->
[]
end
end
defp item_creation_tags(tags, %{data: %{"type" => "Create"}} = object, activity) do
tags ++ hashtags_to_topics(object) ++ attachment_topics(object, activity)
end
defp item_creation_tags(tags, _, _) do
tags
end
defp hashtags_to_topics(%{data: %{"tag" => tags}}) do
tags
|> Enum.filter(&is_bitstring(&1))
|> Enum.map(fn tag -> "hashtag:" <> tag end)
end
defp hashtags_to_topics(_), do: []
defp attachment_topics(%{data: %{"attachment" => []}}, _act), do: []
defp attachment_topics(_object, %{local: true}), do: ["public:media", "public:local:media"]
defp attachment_topics(_object, _act), do: ["public:media"]
end

View file

@ -1,5 +1,5 @@
# Pleroma: A lightweight social networking server # Pleroma: A lightweight social networking server
# Copyright © 2017-2018 Pleroma Authors <https://pleroma.social/> # Copyright © 2017-2019 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only # SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.Activity.Queries do defmodule Pleroma.Activity.Queries do
@ -13,6 +13,14 @@ defmodule Pleroma.Activity.Queries do
alias Pleroma.Activity alias Pleroma.Activity
@spec by_ap_id(query, String.t()) :: query
def by_ap_id(query \\ Activity, ap_id) do
from(
activity in query,
where: fragment("(?)->>'id' = ?", activity.data, ^to_string(ap_id))
)
end
@spec by_actor(query, String.t()) :: query @spec by_actor(query, String.t()) :: query
def by_actor(query \\ Activity, actor) do def by_actor(query \\ Activity, actor) do
from( from(
@ -21,8 +29,23 @@ def by_actor(query \\ Activity, actor) do
) )
end end
@spec by_object_id(query, String.t()) :: query @spec by_object_id(query, String.t() | [String.t()]) :: query
def by_object_id(query \\ Activity, object_id) do def by_object_id(query \\ Activity, object_id)
def by_object_id(query, object_ids) when is_list(object_ids) do
from(
activity in query,
where:
fragment(
"coalesce((?)->'object'->>'id', (?)->>'object') = ANY(?)",
activity.data,
activity.data,
^object_ids
)
)
end
def by_object_id(query, object_id) when is_binary(object_id) do
from(activity in query, from(activity in query,
where: where:
fragment( fragment(
@ -41,9 +64,4 @@ def by_type(query \\ Activity, activity_type) do
where: fragment("(?)->>'type' = ?", activity.data, ^activity_type) where: fragment("(?)->>'type' = ?", activity.data, ^activity_type)
) )
end end
@spec limit(query, pos_integer()) :: query
def limit(query \\ Activity, limit) do
from(activity in query, limit: ^limit)
end
end end

View file

@ -7,7 +7,6 @@ defmodule Pleroma.ActivityExpiration do
alias Pleroma.Activity alias Pleroma.Activity
alias Pleroma.ActivityExpiration alias Pleroma.ActivityExpiration
alias Pleroma.FlakeId
alias Pleroma.Repo alias Pleroma.Repo
import Ecto.Changeset import Ecto.Changeset
@ -17,7 +16,7 @@ defmodule Pleroma.ActivityExpiration do
@min_activity_lifetime :timer.hours(1) @min_activity_lifetime :timer.hours(1)
schema "activity_expirations" do schema "activity_expirations" do
belongs_to(:activity, Activity, type: FlakeId) belongs_to(:activity, Activity, type: FlakeId.Ecto.CompatType)
field(:scheduled_at, :naive_datetime) field(:scheduled_at, :naive_datetime)
end end

View file

@ -31,34 +31,21 @@ def start(_type, _args) do
children = children =
[ [
Pleroma.Repo, Pleroma.Repo,
Pleroma.Scheduler,
Pleroma.Config.TransferTask, Pleroma.Config.TransferTask,
Pleroma.Emoji, Pleroma.Emoji,
Pleroma.Captcha, Pleroma.Captcha,
Pleroma.FlakeId, Pleroma.Daemons.ScheduledActivityDaemon,
Pleroma.ScheduledActivityWorker, Pleroma.Daemons.ActivityExpirationDaemon
Pleroma.ActivityExpirationWorker
] ++ ] ++
cachex_children() ++ cachex_children() ++
hackney_pool_children() ++ hackney_pool_children() ++
[ [
Pleroma.Web.Federator.RetryQueue,
Pleroma.Stats, Pleroma.Stats,
%{ Pleroma.JobQueueMonitor,
id: :web_push_init, {Oban, Pleroma.Config.get(Oban)}
start: {Task, :start_link, [&Pleroma.Web.Push.init/0]},
restart: :temporary
},
%{
id: :federator_init,
start: {Task, :start_link, [&Pleroma.Web.Federator.init/0]},
restart: :temporary
},
%{
id: :internal_fetch_init,
start: {Task, :start_link, [&Pleroma.Web.ActivityPub.InternalFetchActor.init/0]},
restart: :temporary
}
] ++ ] ++
task_children(@env) ++
oauth_cleanup_child(oauth_cleanup_enabled?()) ++ oauth_cleanup_child(oauth_cleanup_enabled?()) ++
streamer_child(@env) ++ streamer_child(@env) ++
chat_child(@env, chat_enabled?()) ++ chat_child(@env, chat_enabled?()) ++
@ -70,9 +57,7 @@ def start(_type, _args) do
# See http://elixir-lang.org/docs/stable/elixir/Supervisor.html # See http://elixir-lang.org/docs/stable/elixir/Supervisor.html
# for other strategies and supported options # for other strategies and supported options
opts = [strategy: :one_for_one, name: Pleroma.Supervisor] opts = [strategy: :one_for_one, name: Pleroma.Supervisor]
result = Supervisor.start_link(children, opts) Supervisor.start_link(children, opts)
:ok = after_supervisor_start()
result
end end
defp setup_instrumenters do defp setup_instrumenters do
@ -116,10 +101,16 @@ defp cachex_children do
build_cachex("object", default_ttl: 25_000, ttl_interval: 1000, limit: 2500), build_cachex("object", default_ttl: 25_000, ttl_interval: 1000, limit: 2500),
build_cachex("rich_media", default_ttl: :timer.minutes(120), limit: 5000), build_cachex("rich_media", default_ttl: :timer.minutes(120), limit: 5000),
build_cachex("scrubber", limit: 2500), build_cachex("scrubber", limit: 2500),
build_cachex("idempotency", expiration: idempotency_expiration(), limit: 2500) build_cachex("idempotency", expiration: idempotency_expiration(), limit: 2500),
build_cachex("web_resp", limit: 2500),
build_cachex("emoji_packs", expiration: emoji_packs_expiration(), limit: 10),
build_cachex("failed_proxy_url", limit: 2500)
] ]
end end
defp emoji_packs_expiration,
do: expiration(default: :timer.seconds(5 * 60), interval: :timer.seconds(60))
defp idempotency_expiration, defp idempotency_expiration,
do: expiration(default: :timer.seconds(6 * 60 * 60), interval: :timer.seconds(60)) do: expiration(default: :timer.seconds(6 * 60 * 60), interval: :timer.seconds(60))
@ -141,7 +132,7 @@ defp oauth_cleanup_enabled?,
defp streamer_child(:test), do: [] defp streamer_child(:test), do: []
defp streamer_child(_) do defp streamer_child(_) do
[Pleroma.Web.Streamer] [Pleroma.Web.Streamer.supervisor()]
end end
defp oauth_cleanup_child(true), defp oauth_cleanup_child(true),
@ -164,16 +155,38 @@ defp hackney_pool_children do
end end
end end
defp after_supervisor_start do defp task_children(:test) do
with digest_config <- Application.get_env(:pleroma, :email_notifications)[:digest], [
true <- digest_config[:active] do %{
PleromaJobQueue.schedule( id: :web_push_init,
digest_config[:schedule], start: {Task, :start_link, [&Pleroma.Web.Push.init/0]},
:digest_emails, restart: :temporary
Pleroma.DigestEmailWorker },
) %{
id: :federator_init,
start: {Task, :start_link, [&Pleroma.Web.Federator.init/0]},
restart: :temporary
}
]
end end
:ok defp task_children(_) do
[
%{
id: :web_push_init,
start: {Task, :start_link, [&Pleroma.Web.Push.init/0]},
restart: :temporary
},
%{
id: :federator_init,
start: {Task, :start_link, [&Pleroma.Web.Federator.init/0]},
restart: :temporary
},
%{
id: :internal_fetch_init,
start: {Task, :start_link, [&Pleroma.Web.ActivityPub.InternalFetchActor.init/0]},
restart: :temporary
}
]
end end
end end

View file

@ -42,7 +42,7 @@ defp loop(state) do
end end
def puts_activity(activity) do def puts_activity(activity) do
status = Pleroma.Web.MastodonAPI.StatusView.render("status.json", %{activity: activity}) status = Pleroma.Web.MastodonAPI.StatusView.render("show.json", %{activity: activity})
IO.puts("-- #{status.id} by #{status.account.display_name} (#{status.account.acct})") IO.puts("-- #{status.id} by #{status.account.display_name} (#{status.account.acct})")
IO.puts(HtmlSanitizeEx.strip_tags(status.content)) IO.puts(HtmlSanitizeEx.strip_tags(status.content))
IO.puts("") IO.puts("")

View file

@ -10,20 +10,20 @@ defmodule Pleroma.Bookmark do
alias Pleroma.Activity alias Pleroma.Activity
alias Pleroma.Bookmark alias Pleroma.Bookmark
alias Pleroma.FlakeId
alias Pleroma.Repo alias Pleroma.Repo
alias Pleroma.User alias Pleroma.User
@type t :: %__MODULE__{} @type t :: %__MODULE__{}
schema "bookmarks" do schema "bookmarks" do
belongs_to(:user, User, type: FlakeId) belongs_to(:user, User, type: FlakeId.Ecto.CompatType)
belongs_to(:activity, Activity, type: FlakeId) belongs_to(:activity, Activity, type: FlakeId.Ecto.CompatType)
timestamps() timestamps()
end end
@spec create(FlakeId.t(), FlakeId.t()) :: {:ok, Bookmark.t()} | {:error, Changeset.t()} @spec create(FlakeId.Ecto.CompatType.t(), FlakeId.Ecto.CompatType.t()) ::
{:ok, Bookmark.t()} | {:error, Changeset.t()}
def create(user_id, activity_id) do def create(user_id, activity_id) do
attrs = %{ attrs = %{
user_id: user_id, user_id: user_id,
@ -37,7 +37,7 @@ def create(user_id, activity_id) do
|> Repo.insert() |> Repo.insert()
end end
@spec for_user_query(FlakeId.t()) :: Ecto.Query.t() @spec for_user_query(FlakeId.Ecto.CompatType.t()) :: Ecto.Query.t()
def for_user_query(user_id) do def for_user_query(user_id) do
Bookmark Bookmark
|> where(user_id: ^user_id) |> where(user_id: ^user_id)
@ -52,7 +52,8 @@ def get(user_id, activity_id) do
|> Repo.one() |> Repo.one()
end end
@spec destroy(FlakeId.t(), FlakeId.t()) :: {:ok, Bookmark.t()} | {:error, Changeset.t()} @spec destroy(FlakeId.Ecto.CompatType.t(), FlakeId.Ecto.CompatType.t()) ::
{:ok, Bookmark.t()} | {:error, Changeset.t()}
def destroy(user_id, activity_id) do def destroy(user_id, activity_id) do
from(b in Bookmark, from(b in Bookmark,
where: b.user_id == ^user_id, where: b.user_id == ^user_id,

View file

@ -6,4 +6,16 @@ defmodule Pleroma.Constants do
use Const use Const
const(as_public, do: "https://www.w3.org/ns/activitystreams#Public") const(as_public, do: "https://www.w3.org/ns/activitystreams#Public")
const(object_internal_fields,
do: [
"likes",
"like_count",
"announcements",
"announcement_count",
"emoji",
"context_id",
"deleted_activity_id"
]
)
end end

View file

@ -67,6 +67,8 @@ def create_or_bump_for(activity, opts \\ []) do
participations = participations =
Enum.map(users, fn user -> Enum.map(users, fn user ->
User.increment_unread_conversation_count(conversation, user)
{:ok, participation} = {:ok, participation} =
Participation.create_for_user_and_conversation(user, conversation, opts) Participation.create_for_user_and_conversation(user, conversation, opts)

View file

@ -13,10 +13,10 @@ defmodule Pleroma.Conversation.Participation do
import Ecto.Query import Ecto.Query
schema "conversation_participations" do schema "conversation_participations" do
belongs_to(:user, User, type: Pleroma.FlakeId) belongs_to(:user, User, type: FlakeId.Ecto.CompatType)
belongs_to(:conversation, Conversation) belongs_to(:conversation, Conversation)
field(:read, :boolean, default: false) field(:read, :boolean, default: false)
field(:last_activity_id, Pleroma.FlakeId, virtual: true) field(:last_activity_id, FlakeId.Ecto.CompatType, virtual: true)
has_many(:recipient_ships, RecipientShip) has_many(:recipient_ships, RecipientShip)
has_many(:recipients, through: [:recipient_ships, :user]) has_many(:recipients, through: [:recipient_ships, :user])
@ -52,6 +52,15 @@ def mark_as_read(participation) do
participation participation
|> read_cng(%{read: true}) |> read_cng(%{read: true})
|> Repo.update() |> Repo.update()
|> case do
{:ok, participation} ->
participation = Repo.preload(participation, :user)
User.set_unread_conversation_count(participation.user)
{:ok, participation}
error ->
error
end
end end
def mark_as_unread(participation) do def mark_as_unread(participation) do
@ -135,4 +144,12 @@ def set_recipients(participation, user_ids) do
{:ok, Repo.preload(participation, :recipients, force: true)} {:ok, Repo.preload(participation, :recipients, force: true)}
end end
def unread_conversation_count_for_user(user) do
from(p in __MODULE__,
where: p.user_id == ^user.id,
where: not p.read,
select: %{count: count(p.id)}
)
end
end end

View file

@ -12,7 +12,7 @@ defmodule Pleroma.Conversation.Participation.RecipientShip do
import Ecto.Changeset import Ecto.Changeset
schema "conversation_participation_recipient_ships" do schema "conversation_participation_recipient_ships" do
belongs_to(:user, User, type: Pleroma.FlakeId) belongs_to(:user, User, type: FlakeId.Ecto.CompatType)
belongs_to(:participation, Participation) belongs_to(:participation, Participation)
end end

View file

@ -2,13 +2,14 @@
# Copyright © 2019 Pleroma Authors <https://pleroma.social/> # Copyright © 2019 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only # SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.ActivityExpirationWorker do defmodule Pleroma.Daemons.ActivityExpirationDaemon do
alias Pleroma.Activity alias Pleroma.Activity
alias Pleroma.ActivityExpiration alias Pleroma.ActivityExpiration
alias Pleroma.Config alias Pleroma.Config
alias Pleroma.Repo alias Pleroma.Repo
alias Pleroma.User alias Pleroma.User
alias Pleroma.Web.CommonAPI alias Pleroma.Web.CommonAPI
require Logger require Logger
use GenServer use GenServer
import Ecto.Query import Ecto.Query
@ -49,7 +50,10 @@ def perform(:execute, expiration_id) do
def handle_info(:perform, state) do def handle_info(:perform, state) do
ActivityExpiration.due_expirations(@schedule_interval) ActivityExpiration.due_expirations(@schedule_interval)
|> Enum.each(fn expiration -> |> Enum.each(fn expiration ->
PleromaJobQueue.enqueue(:activity_expiration, __MODULE__, [:execute, expiration.id]) Pleroma.Workers.ActivityExpirationWorker.enqueue(
"activity_expiration",
%{"activity_expiration_id" => expiration.id}
)
end) end)
schedule_next() schedule_next()

View file

@ -2,10 +2,11 @@
# Copyright © 2017-2019 Pleroma Authors <https://pleroma.social/> # Copyright © 2017-2019 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only # SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.DigestEmailWorker do defmodule Pleroma.Daemons.DigestEmailDaemon do
import Ecto.Query alias Pleroma.Repo
alias Pleroma.Workers.DigestEmailsWorker
@queue_name :digest_emails import Ecto.Query
def perform do def perform do
config = Pleroma.Config.get([:email_notifications, :digest]) config = Pleroma.Config.get([:email_notifications, :digest])
@ -20,8 +21,10 @@ def perform do
where: u.last_digest_emailed_at < datetime_add(^now, ^negative_interval, "day"), where: u.last_digest_emailed_at < datetime_add(^now, ^negative_interval, "day"),
select: u select: u
) )
|> Pleroma.Repo.all() |> Repo.all()
|> Enum.each(&PleromaJobQueue.enqueue(@queue_name, __MODULE__, [&1])) |> Enum.each(fn user ->
DigestEmailsWorker.enqueue("digest_email", %{"user_id" => user.id})
end)
end end
@doc """ @doc """

View file

@ -2,7 +2,7 @@
# Copyright © 2017-2019 Pleroma Authors <https://pleroma.social/> # Copyright © 2017-2019 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only # SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.ScheduledActivityWorker do defmodule Pleroma.Daemons.ScheduledActivityDaemon do
@moduledoc """ @moduledoc """
Sends scheduled activities to the job queue. Sends scheduled activities to the job queue.
""" """
@ -11,6 +11,7 @@ defmodule Pleroma.ScheduledActivityWorker do
alias Pleroma.ScheduledActivity alias Pleroma.ScheduledActivity
alias Pleroma.User alias Pleroma.User
alias Pleroma.Web.CommonAPI alias Pleroma.Web.CommonAPI
use GenServer use GenServer
require Logger require Logger
@ -45,7 +46,10 @@ def perform(:execute, scheduled_activity_id) do
def handle_info(:perform, state) do def handle_info(:perform, state) do
ScheduledActivity.due_activities(@schedule_interval) ScheduledActivity.due_activities(@schedule_interval)
|> Enum.each(fn scheduled_activity -> |> Enum.each(fn scheduled_activity ->
PleromaJobQueue.enqueue(:scheduled_activities, __MODULE__, [:execute, scheduled_activity.id]) Pleroma.Workers.ScheduledActivityWorker.enqueue(
"execute",
%{"activity_id" => scheduled_activity.id}
)
end) end)
schedule_next() schedule_next()

50
lib/pleroma/delivery.ex Normal file
View file

@ -0,0 +1,50 @@
# Pleroma: A lightweight social networking server
# Copyright © 2017-2019 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.Delivery do
use Ecto.Schema
alias Pleroma.Delivery
alias Pleroma.Object
alias Pleroma.Repo
alias Pleroma.User
alias Pleroma.User
import Ecto.Changeset
import Ecto.Query
schema "deliveries" do
belongs_to(:user, User, type: FlakeId.Ecto.CompatType)
belongs_to(:object, Object)
end
def changeset(delivery, params \\ %{}) do
delivery
|> cast(params, [:user_id, :object_id])
|> validate_required([:user_id, :object_id])
|> foreign_key_constraint(:object_id)
|> foreign_key_constraint(:user_id)
|> unique_constraint(:user_id, name: :deliveries_user_id_object_id_index)
end
def create(object_id, user_id) do
%Delivery{}
|> changeset(%{user_id: user_id, object_id: object_id})
|> Repo.insert(on_conflict: :nothing)
end
def get(object_id, user_id) do
from(d in Delivery, where: d.user_id == ^user_id and d.object_id == ^object_id)
|> Repo.one()
end
# A hack because user delete activities have a fake id for whatever reason
# TODO: Get rid of this
def delete_all_by_object_id("pleroma:fake_object_id"), do: {0, []}
def delete_all_by_object_id(object_id) do
from(d in Delivery, where: d.object_id == ^object_id)
|> Repo.delete_all()
end
end

View file

@ -0,0 +1,73 @@
defmodule Pleroma.Docs.Generator do
@callback process(keyword()) :: {:ok, String.t()}
@spec process(module(), keyword()) :: {:ok, String.t()}
def process(implementation, descriptions) do
implementation.process(descriptions)
end
@spec uploaders_list() :: [module()]
def uploaders_list do
{:ok, modules} = :application.get_key(:pleroma, :modules)
Enum.filter(modules, fn module ->
name_as_list = Module.split(module)
List.starts_with?(name_as_list, ["Pleroma", "Uploaders"]) and
List.last(name_as_list) != "Uploader"
end)
end
@spec filters_list() :: [module()]
def filters_list do
{:ok, modules} = :application.get_key(:pleroma, :modules)
Enum.filter(modules, fn module ->
name_as_list = Module.split(module)
List.starts_with?(name_as_list, ["Pleroma", "Upload", "Filter"])
end)
end
@spec mrf_list() :: [module()]
def mrf_list do
{:ok, modules} = :application.get_key(:pleroma, :modules)
Enum.filter(modules, fn module ->
name_as_list = Module.split(module)
List.starts_with?(name_as_list, ["Pleroma", "Web", "ActivityPub", "MRF"]) and
length(name_as_list) > 4
end)
end
@spec richmedia_parsers() :: [module()]
def richmedia_parsers do
{:ok, modules} = :application.get_key(:pleroma, :modules)
Enum.filter(modules, fn module ->
name_as_list = Module.split(module)
List.starts_with?(name_as_list, ["Pleroma", "Web", "RichMedia", "Parsers"]) and
length(name_as_list) == 5
end)
end
end
defimpl Jason.Encoder, for: Tuple do
def encode(tuple, opts) do
Jason.Encode.list(Tuple.to_list(tuple), opts)
end
end
defimpl Jason.Encoder, for: [Regex, Function] do
def encode(term, opts) do
Jason.Encode.string(inspect(term), opts)
end
end
defimpl String.Chars, for: Regex do
def to_string(term) do
inspect(term)
end
end

20
lib/pleroma/docs/json.ex Normal file
View file

@ -0,0 +1,20 @@
defmodule Pleroma.Docs.JSON do
@behaviour Pleroma.Docs.Generator
@spec process(keyword()) :: {:ok, String.t()}
def process(descriptions) do
config_path = "docs/generate_config.json"
with {:ok, file} <- File.open(config_path, [:write]),
json <- generate_json(descriptions),
:ok <- IO.write(file, json),
:ok <- File.close(file) do
{:ok, config_path}
end
end
@spec generate_json([keyword()]) :: String.t()
def generate_json(descriptions) do
Jason.encode!(descriptions)
end
end

View file

@ -0,0 +1,88 @@
defmodule Pleroma.Docs.Markdown do
@behaviour Pleroma.Docs.Generator
@spec process(keyword()) :: {:ok, String.t()}
def process(descriptions) do
config_path = "docs/generated_config.md"
{:ok, file} = File.open(config_path, [:utf8, :write])
IO.write(file, "# Generated configuration\n")
IO.write(file, "Date of generation: #{Date.utc_today()}\n\n")
IO.write(
file,
"This file describe the configuration, it is recommended to edit the relevant `*.secret.exs` file instead of the others founds in the ``config`` directory.\n\n" <>
"If you run Pleroma with ``MIX_ENV=prod`` the file is ``prod.secret.exs``, otherwise it is ``dev.secret.exs``.\n\n"
)
for group <- descriptions do
if is_nil(group[:key]) do
IO.write(file, "## #{inspect(group[:group])}\n")
else
IO.write(file, "## #{inspect(group[:key])}\n")
end
IO.write(file, "#{group[:description]}\n")
for child <- group[:children] || [] do
print_child_header(file, child)
print_suggestions(file, child[:suggestions])
if child[:children] do
for subchild <- child[:children] do
print_child_header(file, subchild)
print_suggestions(file, subchild[:suggestions])
end
end
end
IO.write(file, "\n")
end
:ok = File.close(file)
{:ok, config_path}
end
defp print_child_header(file, %{key: key, type: type, description: description} = _child) do
IO.write(
file,
"- `#{inspect(key)}` (`#{inspect(type)}`): #{description} \n"
)
end
defp print_child_header(file, %{key: key, type: type} = _child) do
IO.write(file, "- `#{inspect(key)}` (`#{inspect(type)}`) \n")
end
defp print_suggestion(file, suggestion) when is_list(suggestion) do
IO.write(file, " `#{inspect(suggestion)}`\n")
end
defp print_suggestion(file, suggestion) when is_function(suggestion) do
IO.write(file, " `#{inspect(suggestion.())}`\n")
end
defp print_suggestion(file, suggestion, as_list \\ false) do
list_mark = if as_list, do: "- ", else: ""
IO.write(file, " #{list_mark}`#{inspect(suggestion)}`\n")
end
defp print_suggestions(_file, nil), do: nil
defp print_suggestions(_file, ""), do: nil
defp print_suggestions(file, suggestions) do
if length(suggestions) > 1 do
IO.write(file, "Suggestions:\n")
for suggestion <- suggestions do
print_suggestion(file, suggestion, true)
end
else
IO.write(file, " Suggestion: ")
print_suggestion(file, List.first(suggestions))
end
end
end

View file

@ -17,7 +17,7 @@ defp instance_notify_email do
end end
defp user_url(user) do defp user_url(user) do
Helpers.o_status_url(Pleroma.Web.Endpoint, :feed_redirect, user.nickname) Helpers.feed_url(Pleroma.Web.Endpoint, :feed_redirect, user.id)
end end
def report(to, reporter, account, statuses, comment) do def report(to, reporter, account, statuses, comment) do

View file

@ -9,6 +9,7 @@ defmodule Pleroma.Emails.Mailer do
The module contains functions to delivery email using Swoosh.Mailer. The module contains functions to delivery email using Swoosh.Mailer.
""" """
alias Pleroma.Workers.MailerWorker
alias Swoosh.DeliveryError alias Swoosh.DeliveryError
@otp_app :pleroma @otp_app :pleroma
@ -19,7 +20,12 @@ def enabled?, do: Pleroma.Config.get([__MODULE__, :enabled])
@doc "add email to queue" @doc "add email to queue"
def deliver_async(email, config \\ []) do def deliver_async(email, config \\ []) do
PleromaJobQueue.enqueue(:mailer, __MODULE__, [:deliver_async, email, config]) encoded_email =
email
|> :erlang.term_to_binary()
|> Base.encode64()
MailerWorker.enqueue("email", %{"encoded_email" => encoded_email, "config" => config})
end end
@doc "callback to perform send email from queue" @doc "callback to perform send email from queue"

View file

@ -4,24 +4,37 @@
defmodule Pleroma.Emoji do defmodule Pleroma.Emoji do
@moduledoc """ @moduledoc """
The emojis are loaded from: This GenServer stores in an ETS table the list of the loaded emojis,
and also allows to reload the list at runtime.
* emoji packs in INSTANCE-DIR/emoji
* the files: `config/emoji.txt` and `config/custom_emoji.txt`
* glob paths, nested folder is used as tag name for grouping e.g. priv/static/emoji/custom/nested_folder
This GenServer stores in an ETS table the list of the loaded emojis, and also allows to reload the list at runtime.
""" """
use GenServer use GenServer
alias Pleroma.Emoji.Loader
require Logger require Logger
@type pattern :: Regex.t() | module() | String.t()
@type patterns :: pattern() | [pattern()]
@type group_patterns :: keyword(patterns())
@ets __MODULE__.Ets @ets __MODULE__.Ets
@ets_options [:ordered_set, :protected, :named_table, {:read_concurrency, true}] @ets_options [
:ordered_set,
:protected,
:named_table,
{:read_concurrency, true}
]
defstruct [:code, :file, :tags, :safe_code, :safe_file]
@doc "Build emoji struct"
def build({code, file, tags}) do
%__MODULE__{
code: code,
file: file,
tags: tags,
safe_code: Pleroma.HTML.strip_tags(code),
safe_file: Pleroma.HTML.strip_tags(file)
}
end
def build({code, file}), do: build({code, file, []})
@doc false @doc false
def start_link(_) do def start_link(_) do
@ -44,11 +57,14 @@ def get(name) do
end end
@doc "Returns all the emojos!!" @doc "Returns all the emojos!!"
@spec get_all() :: [{String.t(), String.t()}, ...] @spec get_all() :: list({String.t(), String.t(), String.t()})
def get_all do def get_all do
:ets.tab2list(@ets) :ets.tab2list(@ets)
end end
@doc "Clear out old emojis"
def clear_all, do: :ets.delete_all_objects(@ets)
@doc false @doc false
def init(_) do def init(_) do
@ets = :ets.new(@ets, @ets_options) @ets = :ets.new(@ets, @ets_options)
@ -58,13 +74,13 @@ def init(_) do
@doc false @doc false
def handle_cast(:reload, state) do def handle_cast(:reload, state) do
load() update_emojis(Loader.load())
{:noreply, state} {:noreply, state}
end end
@doc false @doc false
def handle_call(:reload, _from, state) do def handle_call(:reload, _from, state) do
load() update_emojis(Loader.load())
{:reply, :ok, state} {:reply, :ok, state}
end end
@ -75,189 +91,11 @@ def terminate(_, _) do
@doc false @doc false
def code_change(_old_vsn, state, _extra) do def code_change(_old_vsn, state, _extra) do
load() update_emojis(Loader.load())
{:ok, state} {:ok, state}
end end
defp load do defp update_emojis(emojis) do
emoji_dir_path = :ets.insert(@ets, emojis)
Path.join(
Pleroma.Config.get!([:instance, :static_dir]),
"emoji"
)
emoji_groups = Pleroma.Config.get([:emoji, :groups])
case File.ls(emoji_dir_path) do
{:error, :enoent} ->
# The custom emoji directory doesn't exist,
# don't do anything
nil
{:error, e} ->
# There was some other error
Logger.error("Could not access the custom emoji directory #{emoji_dir_path}: #{e}")
{:ok, results} ->
grouped =
Enum.group_by(results, fn file -> File.dir?(Path.join(emoji_dir_path, file)) end)
packs = grouped[true] || []
files = grouped[false] || []
# Print the packs we've found
Logger.info("Found emoji packs: #{Enum.join(packs, ", ")}")
if not Enum.empty?(files) do
Logger.warn(
"Found files in the emoji folder. These will be ignored, please move them to a subdirectory\nFound files: #{
Enum.join(files, ", ")
}"
)
end
emojis =
Enum.flat_map(
packs,
fn pack -> load_pack(Path.join(emoji_dir_path, pack), emoji_groups) end
)
true = :ets.insert(@ets, emojis)
end
# Compat thing for old custom emoji handling & default emoji,
# it should run even if there are no emoji packs
shortcode_globs = Pleroma.Config.get([:emoji, :shortcode_globs], [])
emojis =
(load_from_file("config/emoji.txt", emoji_groups) ++
load_from_file("config/custom_emoji.txt", emoji_groups) ++
load_from_globs(shortcode_globs, emoji_groups))
|> Enum.reject(fn value -> value == nil end)
true = :ets.insert(@ets, emojis)
:ok
end
defp load_pack(pack_dir, emoji_groups) do
pack_name = Path.basename(pack_dir)
emoji_txt = Path.join(pack_dir, "emoji.txt")
if File.exists?(emoji_txt) do
load_from_file(emoji_txt, emoji_groups)
else
extensions = Pleroma.Config.get([:emoji, :pack_extensions])
Logger.info(
"No emoji.txt found for pack \"#{pack_name}\", assuming all #{Enum.join(extensions, ", ")} files are emoji"
)
make_shortcode_to_file_map(pack_dir, extensions)
|> Enum.map(fn {shortcode, rel_file} ->
filename = Path.join("/emoji/#{pack_name}", rel_file)
{shortcode, filename, [to_string(match_extra(emoji_groups, filename))]}
end)
end
end
def make_shortcode_to_file_map(pack_dir, exts) do
find_all_emoji(pack_dir, exts)
|> Enum.map(&Path.relative_to(&1, pack_dir))
|> Enum.map(fn f -> {f |> Path.basename() |> Path.rootname(), f} end)
|> Enum.into(%{})
end
def find_all_emoji(dir, exts) do
Enum.reduce(
File.ls!(dir),
[],
fn f, acc ->
filepath = Path.join(dir, f)
if File.dir?(filepath) do
acc ++ find_all_emoji(filepath, exts)
else
acc ++ [filepath]
end
end
)
|> Enum.filter(fn f -> Path.extname(f) in exts end)
end
defp load_from_file(file, emoji_groups) do
if File.exists?(file) do
load_from_file_stream(File.stream!(file), emoji_groups)
else
[]
end
end
defp load_from_file_stream(stream, emoji_groups) do
stream
|> Stream.map(&String.trim/1)
|> Stream.map(fn line ->
case String.split(line, ~r/,\s*/) do
[name, file] ->
{name, file, [to_string(match_extra(emoji_groups, file))]}
[name, file | tags] ->
{name, file, tags}
_ ->
nil
end
end)
|> Enum.to_list()
end
defp load_from_globs(globs, emoji_groups) do
static_path = Path.join(:code.priv_dir(:pleroma), "static")
paths =
Enum.map(globs, fn glob ->
Path.join(static_path, glob)
|> Path.wildcard()
end)
|> Enum.concat()
Enum.map(paths, fn path ->
tag = match_extra(emoji_groups, Path.join("/", Path.relative_to(path, static_path)))
shortcode = Path.basename(path, Path.extname(path))
external_path = Path.join("/", Path.relative_to(path, static_path))
{shortcode, external_path, [to_string(tag)]}
end)
end
@doc """
Finds a matching group for the given emoji filename
"""
@spec match_extra(group_patterns(), String.t()) :: atom() | nil
def match_extra(group_patterns, filename) do
match_group_patterns(group_patterns, fn pattern ->
case pattern do
%Regex{} = regex -> Regex.match?(regex, filename)
string when is_binary(string) -> filename == string
end
end)
end
defp match_group_patterns(group_patterns, matcher) do
Enum.find_value(group_patterns, fn {group, patterns} ->
patterns =
patterns
|> List.wrap()
|> Enum.map(fn pattern ->
if String.contains?(pattern, "*") do
~r(#{String.replace(pattern, "*", ".*")})
else
pattern
end
end)
Enum.any?(patterns, matcher) && group
end)
end end
end end

View file

@ -0,0 +1,59 @@
# Pleroma: A lightweight social networking server
# Copyright © 2017-2019 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.Emoji.Formatter do
alias Pleroma.Emoji
alias Pleroma.HTML
alias Pleroma.Web.MediaProxy
def emojify(text) do
emojify(text, Emoji.get_all())
end
def emojify(text, nil), do: text
def emojify(text, emoji, strip \\ false) do
Enum.reduce(emoji, text, fn
{_, %Emoji{safe_code: emoji, safe_file: file}}, text ->
String.replace(text, ":#{emoji}:", prepare_emoji_html(emoji, file, strip))
{unsafe_emoji, unsafe_file}, text ->
emoji = HTML.strip_tags(unsafe_emoji)
file = HTML.strip_tags(unsafe_file)
String.replace(text, ":#{emoji}:", prepare_emoji_html(emoji, file, strip))
end)
|> HTML.filter_tags()
end
defp prepare_emoji_html(_emoji, _file, true), do: ""
defp prepare_emoji_html(emoji, file, _strip) do
"<img class='emoji' alt='#{emoji}' title='#{emoji}' src='#{MediaProxy.url(file)}' />"
end
def demojify(text) do
emojify(text, Emoji.get_all(), true)
end
def demojify(text, nil), do: text
@doc "Outputs a list of the emoji-shortcodes in a text"
def get_emoji(text) when is_binary(text) do
Enum.filter(Emoji.get_all(), fn {emoji, %Emoji{}} ->
String.contains?(text, ":#{emoji}:")
end)
end
def get_emoji(_), do: []
@doc "Outputs a list of the emoji-Maps in a text"
def get_emoji_map(text) when is_binary(text) do
get_emoji(text)
|> Enum.reduce(%{}, fn {name, %Emoji{file: file}}, acc ->
Map.put(acc, name, "#{Pleroma.Web.Endpoint.static_url()}#{file}")
end)
end
def get_emoji_map(_), do: []
end

224
lib/pleroma/emoji/loader.ex Normal file
View file

@ -0,0 +1,224 @@
# Pleroma: A lightweight social networking server
# Copyright © 2017-2019 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.Emoji.Loader do
@moduledoc """
The Loader emoji from:
* emoji packs in INSTANCE-DIR/emoji
* the files: `config/emoji.txt` and `config/custom_emoji.txt`
* glob paths, nested folder is used as tag name for grouping e.g. priv/static/emoji/custom/nested_folder
"""
alias Pleroma.Config
alias Pleroma.Emoji
require Logger
@type pattern :: Regex.t() | module() | String.t()
@type patterns :: pattern() | [pattern()]
@type group_patterns :: keyword(patterns())
@type emoji :: {String.t(), Emoji.t()}
@doc """
Loads emojis from files/packs.
returns list emojis in format:
`{"000", "/emoji/freespeechextremist.com/000.png", ["Custom"]}`
"""
@spec load() :: list(emoji)
def load do
emoji_dir_path = Path.join(Config.get!([:instance, :static_dir]), "emoji")
emoji_groups = Config.get([:emoji, :groups])
emojis =
case File.ls(emoji_dir_path) do
{:error, :enoent} ->
# The custom emoji directory doesn't exist,
# don't do anything
[]
{:error, e} ->
# There was some other error
Logger.error("Could not access the custom emoji directory #{emoji_dir_path}: #{e}")
[]
{:ok, results} ->
grouped =
Enum.group_by(results, fn file ->
File.dir?(Path.join(emoji_dir_path, file))
end)
packs = grouped[true] || []
files = grouped[false] || []
# Print the packs we've found
Logger.info("Found emoji packs: #{Enum.join(packs, ", ")}")
if not Enum.empty?(files) do
Logger.warn(
"Found files in the emoji folder. These will be ignored, please move them to a subdirectory\nFound files: #{
Enum.join(files, ", ")
}"
)
end
emojis =
Enum.flat_map(packs, fn pack ->
load_pack(Path.join(emoji_dir_path, pack), emoji_groups)
end)
Emoji.clear_all()
emojis
end
# Compat thing for old custom emoji handling & default emoji,
# it should run even if there are no emoji packs
shortcode_globs = Config.get([:emoji, :shortcode_globs], [])
emojis_txt =
(load_from_file("config/emoji.txt", emoji_groups) ++
load_from_file("config/custom_emoji.txt", emoji_groups) ++
load_from_globs(shortcode_globs, emoji_groups))
|> Enum.reject(fn value -> value == nil end)
Enum.map(emojis ++ emojis_txt, &prepare_emoji/1)
end
defp prepare_emoji({code, _, _} = emoji), do: {code, Emoji.build(emoji)}
defp load_pack(pack_dir, emoji_groups) do
pack_name = Path.basename(pack_dir)
pack_file = Path.join(pack_dir, "pack.json")
if File.exists?(pack_file) do
contents = Jason.decode!(File.read!(pack_file))
contents["files"]
|> Enum.map(fn {name, rel_file} ->
filename = Path.join("/emoji/#{pack_name}", rel_file)
{name, filename, ["pack:#{pack_name}"]}
end)
else
# Load from emoji.txt / all files
emoji_txt = Path.join(pack_dir, "emoji.txt")
if File.exists?(emoji_txt) do
load_from_file(emoji_txt, emoji_groups)
else
extensions = Pleroma.Config.get([:emoji, :pack_extensions])
Logger.info(
"No emoji.txt found for pack \"#{pack_name}\", assuming all #{
Enum.join(extensions, ", ")
} files are emoji"
)
make_shortcode_to_file_map(pack_dir, extensions)
|> Enum.map(fn {shortcode, rel_file} ->
filename = Path.join("/emoji/#{pack_name}", rel_file)
{shortcode, filename, [to_string(match_extra(emoji_groups, filename))]}
end)
end
end
end
def make_shortcode_to_file_map(pack_dir, exts) do
find_all_emoji(pack_dir, exts)
|> Enum.map(&Path.relative_to(&1, pack_dir))
|> Enum.map(fn f -> {f |> Path.basename() |> Path.rootname(), f} end)
|> Enum.into(%{})
end
def find_all_emoji(dir, exts) do
dir
|> File.ls!()
|> Enum.flat_map(fn f ->
filepath = Path.join(dir, f)
if File.dir?(filepath) do
find_all_emoji(filepath, exts)
else
[filepath]
end
end)
|> Enum.filter(fn f -> Path.extname(f) in exts end)
end
defp load_from_file(file, emoji_groups) do
if File.exists?(file) do
load_from_file_stream(File.stream!(file), emoji_groups)
else
[]
end
end
defp load_from_file_stream(stream, emoji_groups) do
stream
|> Stream.map(&String.trim/1)
|> Stream.map(fn line ->
case String.split(line, ~r/,\s*/) do
[name, file] ->
{name, file, [to_string(match_extra(emoji_groups, file))]}
[name, file | tags] ->
{name, file, tags}
_ ->
nil
end
end)
|> Enum.to_list()
end
defp load_from_globs(globs, emoji_groups) do
static_path = Path.join(:code.priv_dir(:pleroma), "static")
paths =
Enum.map(globs, fn glob ->
Path.join(static_path, glob)
|> Path.wildcard()
end)
|> Enum.concat()
Enum.map(paths, fn path ->
tag = match_extra(emoji_groups, Path.join("/", Path.relative_to(path, static_path)))
shortcode = Path.basename(path, Path.extname(path))
external_path = Path.join("/", Path.relative_to(path, static_path))
{shortcode, external_path, [to_string(tag)]}
end)
end
@doc """
Finds a matching group for the given emoji filename
"""
@spec match_extra(group_patterns(), String.t()) :: atom() | nil
def match_extra(group_patterns, filename) do
match_group_patterns(group_patterns, fn pattern ->
case pattern do
%Regex{} = regex -> Regex.match?(regex, filename)
string when is_binary(string) -> filename == string
end
end)
end
defp match_group_patterns(group_patterns, matcher) do
Enum.find_value(group_patterns, fn {group, patterns} ->
patterns =
patterns
|> List.wrap()
|> Enum.map(fn pattern ->
if String.contains?(pattern, "*") do
~r(#{String.replace(pattern, "*", ".*")})
else
pattern
end
end)
Enum.any?(patterns, matcher) && group
end)
end
end

View file

@ -12,7 +12,7 @@ defmodule Pleroma.Filter do
alias Pleroma.User alias Pleroma.User
schema "filters" do schema "filters" do
belongs_to(:user, User, type: Pleroma.FlakeId) belongs_to(:user, User, type: FlakeId.Ecto.CompatType)
field(:filter_id, :integer) field(:filter_id, :integer)
field(:hide, :boolean, default: false) field(:hide, :boolean, default: false)
field(:whole_word, :boolean, default: true) field(:whole_word, :boolean, default: true)

View file

@ -1,182 +0,0 @@
# Pleroma: A lightweight social networking server
# Copyright © 2017-2019 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.FlakeId do
@moduledoc """
Flake is a decentralized, k-ordered id generation service.
Adapted from:
* [flaky](https://github.com/nirvana/flaky), released under the terms of the Truly Free License,
* [Flake](https://github.com/boundary/flake), Copyright 2012, Boundary, Apache License, Version 2.0
"""
@type t :: binary
@behaviour Ecto.Type
use GenServer
require Logger
alias __MODULE__
import Kernel, except: [to_string: 1]
defstruct node: nil, time: 0, sq: 0
@doc "Converts a binary Flake to a String"
def to_string(<<0::integer-size(64), id::integer-size(64)>>) do
Kernel.to_string(id)
end
def to_string(<<_::integer-size(64), _::integer-size(48), _::integer-size(16)>> = flake) do
encode_base62(flake)
end
def to_string(s), do: s
def from_string(int) when is_integer(int) do
from_string(Kernel.to_string(int))
end
for i <- [-1, 0] do
def from_string(unquote(i)), do: <<0::integer-size(128)>>
def from_string(unquote(Kernel.to_string(i))), do: <<0::integer-size(128)>>
end
def from_string(<<_::integer-size(128)>> = flake), do: flake
def from_string(string) when is_binary(string) and byte_size(string) < 18 do
case Integer.parse(string) do
{id, ""} -> <<0::integer-size(64), id::integer-size(64)>>
_ -> nil
end
end
def from_string(string) do
string |> decode_base62 |> from_integer
end
def to_integer(<<integer::integer-size(128)>>), do: integer
def from_integer(integer) do
<<_time::integer-size(64), _node::integer-size(48), _seq::integer-size(16)>> =
<<integer::integer-size(128)>>
end
@doc "Generates a Flake"
@spec get :: binary
def get, do: to_string(:gen_server.call(:flake, :get))
# checks that ID is is valid FlakeID
#
@spec is_flake_id?(String.t()) :: boolean
def is_flake_id?(id), do: is_flake_id?(String.to_charlist(id), true)
defp is_flake_id?([c | cs], true) when c >= ?0 and c <= ?9, do: is_flake_id?(cs, true)
defp is_flake_id?([c | cs], true) when c >= ?A and c <= ?Z, do: is_flake_id?(cs, true)
defp is_flake_id?([c | cs], true) when c >= ?a and c <= ?z, do: is_flake_id?(cs, true)
defp is_flake_id?([], true), do: true
defp is_flake_id?(_, _), do: false
# -- Ecto.Type API
@impl Ecto.Type
def type, do: :uuid
@impl Ecto.Type
def cast(value) do
{:ok, FlakeId.to_string(value)}
end
@impl Ecto.Type
def load(value) do
{:ok, FlakeId.to_string(value)}
end
@impl Ecto.Type
def dump(value) do
{:ok, FlakeId.from_string(value)}
end
def autogenerate, do: get()
# -- GenServer API
def start_link(_) do
:gen_server.start_link({:local, :flake}, __MODULE__, [], [])
end
@impl GenServer
def init([]) do
{:ok, %FlakeId{node: worker_id(), time: time()}}
end
@impl GenServer
def handle_call(:get, _from, state) do
{flake, new_state} = get(time(), state)
{:reply, flake, new_state}
end
# Matches when the calling time is the same as the state time. Incr. sq
defp get(time, %FlakeId{time: time, node: node, sq: seq}) do
new_state = %FlakeId{time: time, node: node, sq: seq + 1}
{gen_flake(new_state), new_state}
end
# Matches when the times are different, reset sq
defp get(newtime, %FlakeId{time: time, node: node}) when newtime > time do
new_state = %FlakeId{time: newtime, node: node, sq: 0}
{gen_flake(new_state), new_state}
end
# Error when clock is running backwards
defp get(newtime, %FlakeId{time: time}) when newtime < time do
{:error, :clock_running_backwards}
end
defp gen_flake(%FlakeId{time: time, node: node, sq: seq}) do
<<time::integer-size(64), node::integer-size(48), seq::integer-size(16)>>
end
defp nthchar_base62(n) when n <= 9, do: ?0 + n
defp nthchar_base62(n) when n <= 35, do: ?A + n - 10
defp nthchar_base62(n), do: ?a + n - 36
defp encode_base62(<<integer::integer-size(128)>>) do
integer
|> encode_base62([])
|> List.to_string()
end
defp encode_base62(int, acc) when int < 0, do: encode_base62(-int, acc)
defp encode_base62(int, []) when int == 0, do: '0'
defp encode_base62(int, acc) when int == 0, do: acc
defp encode_base62(int, acc) do
r = rem(int, 62)
id = div(int, 62)
acc = [nthchar_base62(r) | acc]
encode_base62(id, acc)
end
defp decode_base62(s) do
decode_base62(String.to_charlist(s), 0)
end
defp decode_base62([c | cs], acc) when c >= ?0 and c <= ?9,
do: decode_base62(cs, 62 * acc + (c - ?0))
defp decode_base62([c | cs], acc) when c >= ?A and c <= ?Z,
do: decode_base62(cs, 62 * acc + (c - ?A + 10))
defp decode_base62([c | cs], acc) when c >= ?a and c <= ?z,
do: decode_base62(cs, 62 * acc + (c - ?a + 36))
defp decode_base62([], acc), do: acc
defp time do
{mega_seconds, seconds, micro_seconds} = :erlang.timestamp()
1_000_000_000 * mega_seconds + seconds * 1000 + :erlang.trunc(micro_seconds / 1000)
end
defp worker_id do
<<worker::integer-size(48)>> = :crypto.strong_rand_bytes(6)
worker
end
end

View file

@ -3,10 +3,8 @@
# SPDX-License-Identifier: AGPL-3.0-only # SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.Formatter do defmodule Pleroma.Formatter do
alias Pleroma.Emoji
alias Pleroma.HTML alias Pleroma.HTML
alias Pleroma.User alias Pleroma.User
alias Pleroma.Web.MediaProxy
@safe_mention_regex ~r/^(\s*(?<mentions>(@.+?\s+){1,})+)(?<rest>.*)/s @safe_mention_regex ~r/^(\s*(?<mentions>(@.+?\s+){1,})+)(?<rest>.*)/s
@link_regex ~r"((?:http(s)?:\/\/)?[\w.-]+(?:\.[\w\.-]+)+[\w\-\._~%:/?#[\]@!\$&'\(\)\*\+,;=.]+)|[0-9a-z+\-\.]+:[0-9a-z$-_.+!*'(),]+"ui @link_regex ~r"((?:http(s)?:\/\/)?[\w.-]+(?:\.[\w\.-]+)+[\w\-\._~%:/?#[\]@!\$&'\(\)\*\+,;=.]+)|[0-9a-z+\-\.]+:[0-9a-z$-_.+!*'(),]+"ui
@ -36,9 +34,9 @@ def mention_handler("@" <> nickname, buffer, opts, acc) do
nickname_text = get_nickname_text(nickname, opts) nickname_text = get_nickname_text(nickname, opts)
link = link =
"<span class='h-card'><a data-user='#{id}' class='u-url mention' href='#{ap_id}'>@<span>#{ ~s(<span class="h-card"><a data-user="#{id}" class="u-url mention" href="#{ap_id}" rel="ugc">@<span>#{
nickname_text nickname_text
}</span></a></span>" }</span></a></span>)
{link, %{acc | mentions: MapSet.put(acc.mentions, {"@" <> nickname, user})}} {link, %{acc | mentions: MapSet.put(acc.mentions, {"@" <> nickname, user})}}
@ -50,7 +48,7 @@ def mention_handler("@" <> nickname, buffer, opts, acc) do
def hashtag_handler("#" <> tag = tag_text, _buffer, _opts, acc) do def hashtag_handler("#" <> tag = tag_text, _buffer, _opts, acc) do
tag = String.downcase(tag) tag = String.downcase(tag)
url = "#{Pleroma.Web.base_url()}/tag/#{tag}" url = "#{Pleroma.Web.base_url()}/tag/#{tag}"
link = "<a class='hashtag' data-tag='#{tag}' href='#{url}' rel='tag'>#{tag_text}</a>" link = ~s(<a class="hashtag" data-tag="#{tag}" href="#{url}" rel="tag ugc">#{tag_text}</a>)
{link, %{acc | tags: MapSet.put(acc.tags, {tag_text, tag})}} {link, %{acc | tags: MapSet.put(acc.tags, {tag_text, tag})}}
end end
@ -100,51 +98,6 @@ def mentions_escape(text, options \\ []) do
end end
end end
def emojify(text) do
emojify(text, Emoji.get_all())
end
def emojify(text, nil), do: text
def emojify(text, emoji, strip \\ false) do
Enum.reduce(emoji, text, fn emoji_data, text ->
emoji = HTML.strip_tags(elem(emoji_data, 0))
file = HTML.strip_tags(elem(emoji_data, 1))
html =
if not strip do
"<img class='emoji' alt='#{emoji}' title='#{emoji}' src='#{MediaProxy.url(file)}' />"
else
""
end
String.replace(text, ":#{emoji}:", html) |> HTML.filter_tags()
end)
end
def demojify(text) do
emojify(text, Emoji.get_all(), true)
end
def demojify(text, nil), do: text
@doc "Outputs a list of the emoji-shortcodes in a text"
def get_emoji(text) when is_binary(text) do
Enum.filter(Emoji.get_all(), fn {emoji, _, _} -> String.contains?(text, ":#{emoji}:") end)
end
def get_emoji(_), do: []
@doc "Outputs a list of the emoji-Maps in a text"
def get_emoji_map(text) when is_binary(text) do
get_emoji(text)
|> Enum.reduce(%{}, fn {name, file, _group}, acc ->
Map.put(acc, name, "#{Pleroma.Web.Endpoint.static_url()}#{file}")
end)
end
def get_emoji_map(_), do: []
def html_escape({text, mentions, hashtags}, type) do def html_escape({text, mentions, hashtags}, type) do
{html_escape(text, type), mentions, hashtags} {html_escape(text, type), mentions, hashtags}
end end

View file

@ -9,10 +9,12 @@ defmodule Pleroma.Healthcheck do
alias Pleroma.Healthcheck alias Pleroma.Healthcheck
alias Pleroma.Repo alias Pleroma.Repo
@derive Jason.Encoder
defstruct pool_size: 0, defstruct pool_size: 0,
active: 0, active: 0,
idle: 0, idle: 0,
memory_used: 0, memory_used: 0,
job_queue_stats: nil,
healthy: true healthy: true
@type t :: %__MODULE__{ @type t :: %__MODULE__{
@ -20,6 +22,7 @@ defmodule Pleroma.Healthcheck do
active: non_neg_integer(), active: non_neg_integer(),
idle: non_neg_integer(), idle: non_neg_integer(),
memory_used: number(), memory_used: number(),
job_queue_stats: map(),
healthy: boolean() healthy: boolean()
} }
@ -29,6 +32,7 @@ def system_info do
memory_used: Float.round(:erlang.memory(:total) / 1024 / 1024, 2) memory_used: Float.round(:erlang.memory(:total) / 1024 / 1024, 2)
} }
|> assign_db_info() |> assign_db_info()
|> assign_job_queue_stats()
|> check_health() |> check_health()
end end
@ -54,6 +58,11 @@ defp assign_db_info(healthcheck) do
Map.merge(healthcheck, db_info) Map.merge(healthcheck, db_info)
end end
defp assign_job_queue_stats(healthcheck) do
stats = Pleroma.JobQueueMonitor.stats()
Map.put(healthcheck, :job_queue_stats, stats)
end
@spec check_health(Healthcheck.t()) :: Healthcheck.t() @spec check_health(Healthcheck.t()) :: Healthcheck.t()
def check_health(%{pool_size: pool_size, active: active} = check) def check_health(%{pool_size: pool_size, active: active} = check)
when active >= pool_size do when active >= pool_size do

View file

@ -184,7 +184,8 @@ defmodule Pleroma.HTML.Scrubber.Default do
"tag", "tag",
"nofollow", "nofollow",
"noopener", "noopener",
"noreferrer" "noreferrer",
"ugc"
]) ])
Meta.allow_tag_with_these_attributes("a", ["name", "title"]) Meta.allow_tag_with_these_attributes("a", ["name", "title"])
@ -304,7 +305,8 @@ defmodule Pleroma.HTML.Scrubber.LinksOnly do
"nofollow", "nofollow",
"noopener", "noopener",
"noreferrer", "noreferrer",
"me" "me",
"ugc"
]) ])
Meta.allow_tag_with_these_attributes("a", ["name", "title"]) Meta.allow_tag_with_these_attributes("a", ["name", "title"])

View file

@ -90,7 +90,7 @@ def set_reachable(_), do: {:error, nil}
def set_unreachable(url_or_host, unreachable_since \\ nil) def set_unreachable(url_or_host, unreachable_since \\ nil)
def set_unreachable(url_or_host, unreachable_since) when is_binary(url_or_host) do def set_unreachable(url_or_host, unreachable_since) when is_binary(url_or_host) do
unreachable_since = unreachable_since || DateTime.utc_now() unreachable_since = parse_datetime(unreachable_since) || NaiveDateTime.utc_now()
host = host(url_or_host) host = host(url_or_host)
existing_record = Repo.get_by(Instance, %{host: host}) existing_record = Repo.get_by(Instance, %{host: host})
@ -114,4 +114,10 @@ def set_unreachable(url_or_host, unreachable_since) when is_binary(url_or_host)
end end
def set_unreachable(_, _), do: {:error, nil} def set_unreachable(_, _), do: {:error, nil}
defp parse_datetime(datetime) when is_binary(datetime) do
NaiveDateTime.from_iso8601(datetime)
end
defp parse_datetime(datetime), do: datetime
end end

View file

@ -0,0 +1,78 @@
# Pleroma: A lightweight social networking server
# Copyright © 2017-2019 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.JobQueueMonitor do
use GenServer
@initial_state %{workers: %{}, queues: %{}, processed_jobs: 0}
@queue %{processed_jobs: 0, success: 0, failure: 0}
@operation %{processed_jobs: 0, success: 0, failure: 0}
def start_link(_) do
GenServer.start_link(__MODULE__, @initial_state, name: __MODULE__)
end
@impl true
def init(state) do
:telemetry.attach("oban-monitor-failure", [:oban, :failure], &handle_event/4, nil)
:telemetry.attach("oban-monitor-success", [:oban, :success], &handle_event/4, nil)
{:ok, state}
end
def stats do
GenServer.call(__MODULE__, :stats)
end
def handle_event([:oban, status], %{duration: duration}, meta, _) do
GenServer.cast(__MODULE__, {:process_event, status, duration, meta})
end
@impl true
def handle_call(:stats, _from, state) do
{:reply, state, state}
end
@impl true
def handle_cast({:process_event, status, duration, meta}, state) do
state =
state
|> Map.update!(:workers, fn workers ->
workers
|> Map.put_new(meta.worker, %{})
|> Map.update!(meta.worker, &update_worker(&1, status, meta, duration))
end)
|> Map.update!(:queues, fn workers ->
workers
|> Map.put_new(meta.queue, @queue)
|> Map.update!(meta.queue, &update_queue(&1, status, meta, duration))
end)
|> Map.update!(:processed_jobs, &(&1 + 1))
{:noreply, state}
end
defp update_worker(worker, status, meta, duration) do
worker
|> Map.put_new(meta.args["op"], @operation)
|> Map.update!(meta.args["op"], &update_op(&1, status, meta, duration))
end
defp update_op(op, :enqueue, _meta, _duration) do
op
|> Map.update!(:enqueued, &(&1 + 1))
end
defp update_op(op, status, _meta, _duration) do
op
|> Map.update!(:processed_jobs, &(&1 + 1))
|> Map.update!(status, &(&1 + 1))
end
defp update_queue(queue, status, _meta, _duration) do
queue
|> Map.update!(:processed_jobs, &(&1 + 1))
|> Map.update!(status, &(&1 + 1))
end
end

View file

@ -13,7 +13,7 @@ defmodule Pleroma.List do
alias Pleroma.User alias Pleroma.User
schema "lists" do schema "lists" do
belongs_to(:user, User, type: Pleroma.FlakeId) belongs_to(:user, User, type: FlakeId.Ecto.CompatType)
field(:title, :string) field(:title, :string)
field(:following, {:array, :string}, default: []) field(:following, {:array, :string}, default: [])
field(:ap_id, :string) field(:ap_id, :string)
@ -84,22 +84,11 @@ def get_lists_from_activity(%Activity{actor: ap_id}) do
end end
# Get lists to which the account belongs. # Get lists to which the account belongs.
def get_lists_account_belongs(%User{} = owner, account_id) do def get_lists_account_belongs(%User{} = owner, user) do
user = User.get_cached_by_id(account_id) Pleroma.List
|> where([l], l.user_id == ^owner.id)
query = |> where([l], fragment("? = ANY(?)", ^user.follower_address, l.following))
from( |> Repo.all()
l in Pleroma.List,
where:
l.user_id == ^owner.id and
fragment(
"? = ANY(?)",
^user.follower_address,
l.following
)
)
Repo.all(query)
end end
def rename(%Pleroma.List{} = list, title) do def rename(%Pleroma.List{} = list, title) do

View file

@ -14,61 +14,143 @@ defmodule Pleroma.ModerationLog do
timestamps() timestamps()
end end
def get_all(page, page_size) do def get_all(params) do
from(q in __MODULE__, base_query =
order_by: [desc: q.inserted_at], get_all_query()
|> maybe_filter_by_date(params)
|> maybe_filter_by_user(params)
|> maybe_filter_by_search(params)
query_with_pagination = base_query |> paginate_query(params)
%{
items: Repo.all(query_with_pagination),
count: Repo.aggregate(base_query, :count, :id)
}
end
defp maybe_filter_by_date(query, %{start_date: nil, end_date: nil}), do: query
defp maybe_filter_by_date(query, %{start_date: start_date, end_date: nil}) do
from(q in query,
where: q.inserted_at >= ^parse_datetime(start_date)
)
end
defp maybe_filter_by_date(query, %{start_date: nil, end_date: end_date}) do
from(q in query,
where: q.inserted_at <= ^parse_datetime(end_date)
)
end
defp maybe_filter_by_date(query, %{start_date: start_date, end_date: end_date}) do
from(q in query,
where: q.inserted_at >= ^parse_datetime(start_date),
where: q.inserted_at <= ^parse_datetime(end_date)
)
end
defp maybe_filter_by_user(query, %{user_id: nil}), do: query
defp maybe_filter_by_user(query, %{user_id: user_id}) do
from(q in query,
where: fragment("(?)->'actor'->>'id' = ?", q.data, ^user_id)
)
end
defp maybe_filter_by_search(query, %{search: search}) when is_nil(search) or search == "",
do: query
defp maybe_filter_by_search(query, %{search: search}) do
from(q in query,
where: fragment("(?)->>'message' ILIKE ?", q.data, ^"%#{search}%")
)
end
defp paginate_query(query, %{page: page, page_size: page_size}) do
from(q in query,
limit: ^page_size, limit: ^page_size,
offset: ^((page - 1) * page_size) offset: ^((page - 1) * page_size)
) )
|> Repo.all()
end end
defp get_all_query do
from(q in __MODULE__,
order_by: [desc: q.inserted_at]
)
end
defp parse_datetime(datetime) do
{:ok, parsed_datetime, _} = DateTime.from_iso8601(datetime)
parsed_datetime
end
@spec insert_log(%{actor: User, subject: User, action: String.t(), permission: String.t()}) ::
{:ok, ModerationLog} | {:error, any}
def insert_log(%{ def insert_log(%{
actor: %User{} = actor, actor: %User{} = actor,
subject: %User{} = subject, subject: %User{} = subject,
action: action, action: action,
permission: permission permission: permission
}) do }) do
Repo.insert(%ModerationLog{ %ModerationLog{
data: %{ data: %{
actor: user_to_map(actor), "actor" => user_to_map(actor),
subject: user_to_map(subject), "subject" => user_to_map(subject),
action: action, "action" => action,
permission: permission "permission" => permission,
"message" => ""
} }
}) }
|> insert_log_entry_with_message()
end end
@spec insert_log(%{actor: User, subject: User, action: String.t()}) ::
{:ok, ModerationLog} | {:error, any}
def insert_log(%{ def insert_log(%{
actor: %User{} = actor, actor: %User{} = actor,
action: "report_update", action: "report_update",
subject: %Activity{data: %{"type" => "Flag"}} = subject subject: %Activity{data: %{"type" => "Flag"}} = subject
}) do }) do
Repo.insert(%ModerationLog{ %ModerationLog{
data: %{ data: %{
actor: user_to_map(actor), "actor" => user_to_map(actor),
action: "report_update", "action" => "report_update",
subject: report_to_map(subject) "subject" => report_to_map(subject),
"message" => ""
} }
}) }
|> insert_log_entry_with_message()
end end
@spec insert_log(%{actor: User, subject: Activity, action: String.t(), text: String.t()}) ::
{:ok, ModerationLog} | {:error, any}
def insert_log(%{ def insert_log(%{
actor: %User{} = actor, actor: %User{} = actor,
action: "report_response", action: "report_response",
subject: %Activity{} = subject, subject: %Activity{} = subject,
text: text text: text
}) do }) do
Repo.insert(%ModerationLog{ %ModerationLog{
data: %{ data: %{
actor: user_to_map(actor), "actor" => user_to_map(actor),
action: "report_response", "action" => "report_response",
subject: report_to_map(subject), "subject" => report_to_map(subject),
text: text "text" => text,
"message" => ""
} }
}) }
|> insert_log_entry_with_message()
end end
@spec insert_log(%{
actor: User,
subject: Activity,
action: String.t(),
sensitive: String.t(),
visibility: String.t()
}) :: {:ok, ModerationLog} | {:error, any}
def insert_log(%{ def insert_log(%{
actor: %User{} = actor, actor: %User{} = actor,
action: "status_update", action: "status_update",
@ -76,41 +158,49 @@ def insert_log(%{
sensitive: sensitive, sensitive: sensitive,
visibility: visibility visibility: visibility
}) do }) do
Repo.insert(%ModerationLog{ %ModerationLog{
data: %{ data: %{
actor: user_to_map(actor), "actor" => user_to_map(actor),
action: "status_update", "action" => "status_update",
subject: status_to_map(subject), "subject" => status_to_map(subject),
sensitive: sensitive, "sensitive" => sensitive,
visibility: visibility "visibility" => visibility,
"message" => ""
} }
}) }
|> insert_log_entry_with_message()
end end
@spec insert_log(%{actor: User, action: String.t(), subject_id: String.t()}) ::
{:ok, ModerationLog} | {:error, any}
def insert_log(%{ def insert_log(%{
actor: %User{} = actor, actor: %User{} = actor,
action: "status_delete", action: "status_delete",
subject_id: subject_id subject_id: subject_id
}) do }) do
Repo.insert(%ModerationLog{ %ModerationLog{
data: %{ data: %{
actor: user_to_map(actor), "actor" => user_to_map(actor),
action: "status_delete", "action" => "status_delete",
subject_id: subject_id "subject_id" => subject_id,
"message" => ""
} }
}) }
|> insert_log_entry_with_message()
end end
@spec insert_log(%{actor: User, subject: User, action: String.t()}) :: @spec insert_log(%{actor: User, subject: User, action: String.t()}) ::
{:ok, ModerationLog} | {:error, any} {:ok, ModerationLog} | {:error, any}
def insert_log(%{actor: %User{} = actor, subject: subject, action: action}) do def insert_log(%{actor: %User{} = actor, subject: subject, action: action}) do
Repo.insert(%ModerationLog{ %ModerationLog{
data: %{ data: %{
actor: user_to_map(actor), "actor" => user_to_map(actor),
action: action, "action" => action,
subject: user_to_map(subject) "subject" => user_to_map(subject),
"message" => ""
} }
}) }
|> insert_log_entry_with_message()
end end
@spec insert_log(%{actor: User, subjects: [User], action: String.t()}) :: @spec insert_log(%{actor: User, subjects: [User], action: String.t()}) ::
@ -118,97 +208,128 @@ def insert_log(%{actor: %User{} = actor, subject: subject, action: action}) do
def insert_log(%{actor: %User{} = actor, subjects: subjects, action: action}) do def insert_log(%{actor: %User{} = actor, subjects: subjects, action: action}) do
subjects = Enum.map(subjects, &user_to_map/1) subjects = Enum.map(subjects, &user_to_map/1)
Repo.insert(%ModerationLog{ %ModerationLog{
data: %{ data: %{
actor: user_to_map(actor), "actor" => user_to_map(actor),
action: action, "action" => action,
subjects: subjects "subjects" => subjects,
"message" => ""
} }
}) }
|> insert_log_entry_with_message()
end end
@spec insert_log(%{actor: User, action: String.t(), followed: User, follower: User}) ::
{:ok, ModerationLog} | {:error, any}
def insert_log(%{ def insert_log(%{
actor: %User{} = actor, actor: %User{} = actor,
followed: %User{} = followed, followed: %User{} = followed,
follower: %User{} = follower, follower: %User{} = follower,
action: "follow" action: "follow"
}) do }) do
Repo.insert(%ModerationLog{ %ModerationLog{
data: %{ data: %{
actor: user_to_map(actor), "actor" => user_to_map(actor),
action: "follow", "action" => "follow",
followed: user_to_map(followed), "followed" => user_to_map(followed),
follower: user_to_map(follower) "follower" => user_to_map(follower),
"message" => ""
} }
}) }
|> insert_log_entry_with_message()
end end
@spec insert_log(%{actor: User, action: String.t(), followed: User, follower: User}) ::
{:ok, ModerationLog} | {:error, any}
def insert_log(%{ def insert_log(%{
actor: %User{} = actor, actor: %User{} = actor,
followed: %User{} = followed, followed: %User{} = followed,
follower: %User{} = follower, follower: %User{} = follower,
action: "unfollow" action: "unfollow"
}) do }) do
Repo.insert(%ModerationLog{ %ModerationLog{
data: %{ data: %{
actor: user_to_map(actor), "actor" => user_to_map(actor),
action: "unfollow", "action" => "unfollow",
followed: user_to_map(followed), "followed" => user_to_map(followed),
follower: user_to_map(follower) "follower" => user_to_map(follower),
"message" => ""
} }
}) }
|> insert_log_entry_with_message()
end end
@spec insert_log(%{
actor: User,
action: String.t(),
nicknames: [String.t()],
tags: [String.t()]
}) :: {:ok, ModerationLog} | {:error, any}
def insert_log(%{ def insert_log(%{
actor: %User{} = actor, actor: %User{} = actor,
nicknames: nicknames, nicknames: nicknames,
tags: tags, tags: tags,
action: action action: action
}) do }) do
Repo.insert(%ModerationLog{ %ModerationLog{
data: %{ data: %{
actor: user_to_map(actor), "actor" => user_to_map(actor),
nicknames: nicknames, "nicknames" => nicknames,
tags: tags, "tags" => tags,
action: action "action" => action,
"message" => ""
} }
}) }
|> insert_log_entry_with_message()
end end
@spec insert_log(%{actor: User, action: String.t(), target: String.t()}) ::
{:ok, ModerationLog} | {:error, any}
def insert_log(%{ def insert_log(%{
actor: %User{} = actor, actor: %User{} = actor,
action: action, action: action,
target: target target: target
}) })
when action in ["relay_follow", "relay_unfollow"] do when action in ["relay_follow", "relay_unfollow"] do
Repo.insert(%ModerationLog{ %ModerationLog{
data: %{ data: %{
actor: user_to_map(actor), "actor" => user_to_map(actor),
action: action, "action" => action,
target: target "target" => target,
"message" => ""
} }
}) }
|> insert_log_entry_with_message()
end
@spec insert_log_entry_with_message(ModerationLog) :: {:ok, ModerationLog} | {:error, any}
defp insert_log_entry_with_message(entry) do
entry.data["message"]
|> put_in(get_log_entry_message(entry))
|> Repo.insert()
end end
defp user_to_map(%User{} = user) do defp user_to_map(%User{} = user) do
user user
|> Map.from_struct() |> Map.from_struct()
|> Map.take([:id, :nickname]) |> Map.take([:id, :nickname])
|> Map.put(:type, "user") |> Map.new(fn {k, v} -> {Atom.to_string(k), v} end)
|> Map.put("type", "user")
end end
defp report_to_map(%Activity{} = report) do defp report_to_map(%Activity{} = report) do
%{ %{
type: "report", "type" => "report",
id: report.id, "id" => report.id,
state: report.data["state"] "state" => report.data["state"]
} }
end end
defp status_to_map(%Activity{} = status) do defp status_to_map(%Activity{} = status) do
%{ %{
type: "status", "type" => "status",
id: status.id "id" => status.id
} }
end end

View file

@ -22,8 +22,8 @@ defmodule Pleroma.Notification do
schema "notifications" do schema "notifications" do
field(:seen, :boolean, default: false) field(:seen, :boolean, default: false)
belongs_to(:user, User, type: Pleroma.FlakeId) belongs_to(:user, User, type: FlakeId.Ecto.CompatType)
belongs_to(:activity, Activity, type: Pleroma.FlakeId) belongs_to(:activity, Activity, type: FlakeId.Ecto.CompatType)
timestamps() timestamps()
end end
@ -210,8 +210,10 @@ def create_notification(%Activity{} = activity, %User{} = user) do
unless skip?(activity, user) do unless skip?(activity, user) do
notification = %Notification{user_id: user.id, activity: activity} notification = %Notification{user_id: user.id, activity: activity}
{:ok, notification} = Repo.insert(notification) {:ok, notification} = Repo.insert(notification)
Streamer.stream("user", notification)
Streamer.stream("user:notification", notification) ["user", "user:notification"]
|> Streamer.stream(notification)
Push.send(notification) Push.send(notification)
notification notification
end end

View file

@ -38,6 +38,24 @@ def change(struct, params \\ %{}) do
def get_by_id(nil), do: nil def get_by_id(nil), do: nil
def get_by_id(id), do: Repo.get(Object, id) def get_by_id(id), do: Repo.get(Object, id)
def get_by_id_and_maybe_refetch(id, opts \\ []) do
%{updated_at: updated_at} = object = get_by_id(id)
if opts[:interval] &&
NaiveDateTime.diff(NaiveDateTime.utc_now(), updated_at) > opts[:interval] do
case Fetcher.refetch_object(object) do
{:ok, %Object{} = object} ->
object
e ->
Logger.error("Couldn't refresh #{object.data["id"]}:\n#{inspect(e)}")
object
end
else
object
end
end
def get_by_ap_id(nil), do: nil def get_by_ap_id(nil), do: nil
def get_by_ap_id(ap_id) do def get_by_ap_id(ap_id) do
@ -130,14 +148,16 @@ def swap_object_with_tombstone(object) do
def delete(%Object{data: %{"id" => id}} = object) do def delete(%Object{data: %{"id" => id}} = object) do
with {:ok, _obj} = swap_object_with_tombstone(object), with {:ok, _obj} = swap_object_with_tombstone(object),
deleted_activity = Activity.delete_by_ap_id(id), deleted_activity = Activity.delete_by_ap_id(id),
{:ok, true} <- Cachex.del(:object_cache, "object:#{id}") do {:ok, true} <- Cachex.del(:object_cache, "object:#{id}"),
{:ok, _} <- Cachex.del(:web_resp_cache, URI.parse(id).path) do
{:ok, object, deleted_activity} {:ok, object, deleted_activity}
end end
end end
def prune(%Object{data: %{"id" => id}} = object) do def prune(%Object{data: %{"id" => id}} = object) do
with {:ok, object} <- Repo.delete(object), with {:ok, object} <- Repo.delete(object),
{:ok, true} <- Cachex.del(:object_cache, "object:#{id}") do {:ok, true} <- Cachex.del(:object_cache, "object:#{id}"),
{:ok, _} <- Cachex.del(:web_resp_cache, URI.parse(id).path) do
{:ok, object} {:ok, object}
end end
end end
@ -228,4 +248,11 @@ def increase_vote_count(ap_id, name) do
_ -> :noop _ -> :noop
end end
end end
@doc "Updates data field of an object"
def update_data(%Object{data: data} = object, attrs \\ %{}) do
object
|> Object.change(%{data: Map.merge(data || %{}, attrs)})
|> Repo.update()
end
end end

View file

@ -6,18 +6,40 @@ defmodule Pleroma.Object.Fetcher do
alias Pleroma.HTTP alias Pleroma.HTTP
alias Pleroma.Object alias Pleroma.Object
alias Pleroma.Object.Containment alias Pleroma.Object.Containment
alias Pleroma.Repo
alias Pleroma.Signature alias Pleroma.Signature
alias Pleroma.Web.ActivityPub.InternalFetchActor alias Pleroma.Web.ActivityPub.InternalFetchActor
alias Pleroma.Web.ActivityPub.Transmogrifier alias Pleroma.Web.ActivityPub.Transmogrifier
alias Pleroma.Web.OStatus alias Pleroma.Web.OStatus
require Logger require Logger
require Pleroma.Constants
defp reinject_object(data) do defp touch_changeset(changeset) do
updated_at =
NaiveDateTime.utc_now()
|> NaiveDateTime.truncate(:second)
Ecto.Changeset.put_change(changeset, :updated_at, updated_at)
end
defp maybe_reinject_internal_fields(data, %{data: %{} = old_data}) do
internal_fields = Map.take(old_data, Pleroma.Constants.object_internal_fields())
Map.merge(data, internal_fields)
end
defp maybe_reinject_internal_fields(data, _), do: data
@spec reinject_object(struct(), map()) :: {:ok, Object.t()} | {:error, any()}
defp reinject_object(struct, data) do
Logger.debug("Reinjecting object #{data["id"]}") Logger.debug("Reinjecting object #{data["id"]}")
with data <- Transmogrifier.fix_object(data), with data <- Transmogrifier.fix_object(data),
{:ok, object} <- Object.create(data) do data <- maybe_reinject_internal_fields(data, struct),
changeset <- Object.change(struct, %{data: data}),
changeset <- touch_changeset(changeset),
{:ok, object} <- Repo.insert_or_update(changeset) do
{:ok, object} {:ok, object}
else else
e -> e ->
@ -26,24 +48,24 @@ defp reinject_object(data) do
end end
end end
def refetch_object(%Object{data: %{"id" => id}} = object) do
with {:local, false} <- {:local, String.starts_with?(id, Pleroma.Web.base_url() <> "/")},
{:ok, data} <- fetch_and_contain_remote_object_from_id(id),
{:ok, object} <- reinject_object(object, data) do
{:ok, object}
else
{:local, true} -> object
e -> {:error, e}
end
end
# TODO: # TODO:
# This will create a Create activity, which we need internally at the moment. # This will create a Create activity, which we need internally at the moment.
def fetch_object_from_id(id, options \\ []) do def fetch_object_from_id(id, options \\ []) do
if object = Object.get_cached_by_ap_id(id) do with {:fetch_object, nil} <- {:fetch_object, Object.get_cached_by_ap_id(id)},
{:ok, object} {:fetch, {:ok, data}} <- {:fetch, fetch_and_contain_remote_object_from_id(id)},
else
Logger.info("Fetching #{id} via AP")
with {:fetch, {:ok, data}} <- {:fetch, fetch_and_contain_remote_object_from_id(id)},
{:normalize, nil} <- {:normalize, Object.normalize(data, false)}, {:normalize, nil} <- {:normalize, Object.normalize(data, false)},
params <- %{ params <- prepare_activity_params(data),
"type" => "Create",
"to" => data["to"],
"cc" => data["cc"],
# Should we seriously keep this attributedTo thing?
"actor" => data["actor"] || data["attributedTo"],
"object" => data
},
{:containment, :ok} <- {:containment, Containment.contain_origin(id, params)}, {:containment, :ok} <- {:containment, Containment.contain_origin(id, params)},
{:ok, activity} <- Transmogrifier.handle_incoming(params, options), {:ok, activity} <- Transmogrifier.handle_incoming(params, options),
{:object, _data, %Object{} = object} <- {:object, _data, %Object{} = object} <-
@ -57,11 +79,14 @@ def fetch_object_from_id(id, options \\ []) do
{:reject, nil} {:reject, nil}
{:object, data, nil} -> {:object, data, nil} ->
reinject_object(data) reinject_object(%Object{}, data)
{:normalize, object = %Object{}} -> {:normalize, object = %Object{}} ->
{:ok, object} {:ok, object}
{:fetch_object, %Object{} = object} ->
{:ok, object}
_e -> _e ->
# Only fallback when receiving a fetch/normalization error with ActivityPub # Only fallback when receiving a fetch/normalization error with ActivityPub
Logger.info("Couldn't get object via AP, trying out OStatus fetching...") Logger.info("Couldn't get object via AP, trying out OStatus fetching...")
@ -73,6 +98,16 @@ def fetch_object_from_id(id, options \\ []) do
end end
end end
end end
defp prepare_activity_params(data) do
%{
"type" => "Create",
"to" => data["to"],
"cc" => data["cc"],
# Should we seriously keep this attributedTo thing?
"actor" => data["actor"] || data["attributedTo"],
"object" => data
}
end end
def fetch_object_from_id!(id, options \\ []) do def fetch_object_from_id!(id, options \\ []) do

View file

@ -64,6 +64,7 @@ def paginate(query, options, :keyset) do
def paginate(query, options, :offset) do def paginate(query, options, :offset) do
query query
|> restrict(:order, options)
|> restrict(:offset, options) |> restrict(:offset, options)
|> restrict(:limit, options) |> restrict(:limit, options)
end end

View file

@ -12,7 +12,7 @@ defmodule Pleroma.PasswordResetToken do
alias Pleroma.User alias Pleroma.User
schema "password_reset_tokens" do schema "password_reset_tokens" do
belongs_to(:user, User, type: Pleroma.FlakeId) belongs_to(:user, User, type: FlakeId.Ecto.CompatType)
field(:token, :string) field(:token, :string)
field(:used, :boolean, default: false) field(:used, :boolean, default: false)

136
lib/pleroma/plugs/cache.ex Normal file
View file

@ -0,0 +1,136 @@
# Pleroma: A lightweight social networking server
# Copyright © 2017-2019 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only
defmodule Pleroma.Plugs.Cache do
@moduledoc """
Caches successful GET responses.
To enable the cache add the plug to a router pipeline or controller:
plug(Pleroma.Plugs.Cache)
## Configuration
To configure the plug you need to pass settings as the second argument to the `plug/2` macro:
plug(Pleroma.Plugs.Cache, [ttl: nil, query_params: true])
Available options:
- `ttl`: An expiration time (time-to-live). This value should be in milliseconds or `nil` to disable expiration. Defaults to `nil`.
- `query_params`: Take URL query string into account (`true`), ignore it (`false`) or limit to specific params only (list). Defaults to `true`.
- `tracking_fun`: A function that is called on successfull responses, no matter if the request is cached or not. It should accept a conn as the first argument and the value assigned to `tracking_fun_data` as the second.
Additionally, you can overwrite the TTL inside a controller action by assigning `cache_ttl` to the connection struct:
def index(conn, _params) do
ttl = 60_000 # one minute
conn
|> assign(:cache_ttl, ttl)
|> render("index.html")
end
"""
import Phoenix.Controller, only: [current_path: 1, json: 2]
import Plug.Conn
@behaviour Plug
@defaults %{ttl: nil, query_params: true}
@impl true
def init([]), do: @defaults
def init(opts) do
opts = Map.new(opts)
Map.merge(@defaults, opts)
end
@impl true
def call(%{method: "GET"} = conn, opts) do
key = cache_key(conn, opts)
case Cachex.get(:web_resp_cache, key) do
{:ok, nil} ->
cache_resp(conn, opts)
{:ok, {content_type, body, tracking_fun_data}} ->
conn = opts.tracking_fun.(conn, tracking_fun_data)
send_cached(conn, {content_type, body})
{:ok, record} ->
send_cached(conn, record)
{atom, message} when atom in [:ignore, :error] ->
render_error(conn, message)
end
end
def call(conn, _), do: conn
# full path including query params
defp cache_key(conn, %{query_params: true}), do: current_path(conn)
# request path without query params
defp cache_key(conn, %{query_params: false}), do: conn.request_path
# request path with specific query params
defp cache_key(conn, %{query_params: query_params}) when is_list(query_params) do
query_string =
conn.params
|> Map.take(query_params)
|> URI.encode_query()
conn.request_path <> "?" <> query_string
end
defp cache_resp(conn, opts) do
register_before_send(conn, fn
%{status: 200, resp_body: body} = conn ->
ttl = Map.get(conn.assigns, :cache_ttl, opts.ttl)
key = cache_key(conn, opts)
content_type = content_type(conn)
conn =
unless opts[:tracking_fun] do
Cachex.put(:web_resp_cache, key, {content_type, body}, ttl: ttl)
conn
else
tracking_fun_data = Map.get(conn.assigns, :tracking_fun_data, nil)
Cachex.put(:web_resp_cache, key, {content_type, body, tracking_fun_data}, ttl: ttl)
opts.tracking_fun.(conn, tracking_fun_data)
end
put_resp_header(conn, "x-cache", "MISS from Pleroma")
conn ->
conn
end)
end
defp content_type(conn) do
conn
|> Plug.Conn.get_resp_header("content-type")
|> hd()
end
defp send_cached(conn, {content_type, body}) do
conn
|> put_resp_content_type(content_type, nil)
|> put_resp_header("x-cache", "HIT from Pleroma")
|> send_resp(:ok, body)
|> halt()
end
defp render_error(conn, message) do
conn
|> put_status(:internal_server_error)
|> json(%{error: message})
|> halt()
end
end

View file

@ -15,7 +15,8 @@ def call(%{assigns: %{valid_signature: true}} = conn, _opts) do
end end
def call(conn, _opts) do def call(conn, _opts) do
[signature | _] = get_req_header(conn, "signature") headers = get_req_header(conn, "signature")
signature = Enum.at(headers, 0)
if signature do if signature do
# set (request-target) header to the appropriate value # set (request-target) header to the appropriate value

Some files were not shown because too many files have changed in this diff Show more