Compare commits

..

6 commits

Author SHA1 Message Date
f3b532c49f include git 2022-07-17 20:20:39 +01:00
e414f96728 allow failure of deps clean 2022-07-17 20:15:25 +01:00
8cb3a5216b release on docker-build 2022-07-17 20:12:08 +01:00
1d5c6aeaf2 fix anchor syntax 2022-07-17 20:11:27 +01:00
78ec1d5b3a fix anchor usage 2022-07-17 20:09:17 +01:00
1fd756ed2a enable docker builds 2022-07-17 20:04:06 +01:00
537 changed files with 10837 additions and 47070 deletions

View file

@ -6,12 +6,12 @@ COPYING
*file
elixir_buildpack.config
test/
instance/
_build
deps
test
benchmarks
docs/site
docker-db
uploads
instance
# Required to get version
!.git

5
.gitattributes vendored
View file

@ -1,11 +1,10 @@
*.ex diff=elixir
*.exs diff=elixir
priv/static/instance/static.css diff=css
# Most of js/css files included in the repo are minified bundles,
# and we don't want to search/diff those as text files.
*.js binary
*.js.map binary
*.css binary
priv/static/instance/static.css diff=css
priv/static/static-fe/static-fe.css diff=css

View file

@ -1,85 +0,0 @@
name: "Bug report"
about: "Something isn't working as expected"
title: "[bug] "
body:
- type: markdown
attributes:
value: |
Thanks for taking the time to file this bug report! Please try to be as specific and detailed as you can, so we can track down the issue and fix it as soon as possible.
# General information
- type: dropdown
id: installation
attributes:
label: "Your setup"
description: "What sort of installation are you using?"
options:
- "OTP"
- "From source"
- "Docker"
validations:
required: true
- type: input
id: setup-details
attributes:
label: "Extra details"
description: "If installing from source or docker, please specify your distro or docker setup."
placeholder: "e.g. Alpine Linux edge"
- type: input
id: version
attributes:
label: "Version"
description: "Which version of Akkoma are you running? If running develop, specify the commit hash."
placeholder: "e.g. 2022.11, 4e4bd248"
- type: input
id: postgres
attributes:
label: "PostgreSQL version"
placeholder: "14"
validations:
required: true
- type: markdown
attributes:
value: "# The issue"
- type: textarea
id: attempt
attributes:
label: "What were you trying to do?"
validations:
required: true
- type: textarea
id: expectation
attributes:
label: "What did you expect to happen?"
validations:
required: true
- type: textarea
id: reality
attributes:
label: "What actually happened?"
validations:
required: true
- type: textarea
id: logs
attributes:
label: "Logs"
description: "Please copy and paste any relevant log output, if applicable."
render: shell
- type: dropdown
id: severity
attributes:
label: "Severity"
description: "Does this issue prevent you from using the software as normal?"
options:
- "I cannot use the software"
- "I cannot use it as easily as I'd like"
- "I can manage"
validations:
required: true
- type: checkboxes
id: searched
attributes:
label: "Have you searched for this issue?"
description: "Please double-check that your issue is not already being tracked on [the forums](https://meta.akkoma.dev) or [the issue tracker](https://akkoma.dev/AkkomaGang/akkoma/issues)."
options:
- label: "I have double-checked and have not found this issue mentioned anywhere."

View file

@ -1,29 +0,0 @@
name: "Feature request"
about: "I'd like something to be added to Akkoma"
title: "[feat] "
body:
- type: markdown
attributes:
value: "Thanks for taking the time to request a new feature! Please be as concise and clear as you can in your proposal, so we could understand what you're going for."
- type: textarea
id: idea
attributes:
label: "The idea"
description: "What do you think you should be able to do in Akkoma?"
validations:
required: true
- type: textarea
id: reason
attributes:
label: "The reasoning"
description: "Why would this be a worthwhile feature? Does it solve any problems? Have people talked about wanting it?"
validations:
required: true
- type: checkboxes
id: searched
attributes:
label: "Have you searched for this feature request?"
description: "Please double-check that your issue is not already being tracked on [the forums](https://meta.akkoma.dev), [the issue tracker](https://akkoma.dev/AkkomaGang/akkoma/issues), or the one for [pleroma-fe](https://akkoma.dev/AkkomaGang/pleroma-fe/issues)."
options:
- label: "I have double-checked and have not found this feature request mentioned anywhere."
- label: "This feature is related to the Akkoma backend specifically, and not pleroma-fe."

11
.gitignore vendored
View file

@ -1,6 +1,5 @@
# App artifacts
docs/site
*.zip
*.sw*
secret
/_build
@ -18,13 +17,6 @@ secret
/instance
/priv/ssh_keys
vm.args
.cache/
.hex/
.mix/
.psql_history
docker-resources/Dockerfile
docker-resources/Caddyfile
pgdata
# Prevent committing custom emojis
/priv/static/emoji/custom/*
@ -73,6 +65,3 @@ pleroma.iml
# Generated documentation
docs/site
# docker stuff
docker-db

View file

@ -0,0 +1,18 @@
<!--
### Precheck
* For support use https://git.pleroma.social/pleroma/pleroma-support or [community channels](https://git.pleroma.social/pleroma/pleroma#community-channels).
* Please do a quick search to ensure no similar bug has been reported before. If the bug has not been addressed after 2 weeks, it's fine to bump it.
* Try to ensure that the bug is actually related to the Pleroma backend. For example, if a bug happens in Pleroma-FE but not in Mastodon-FE or mobile clients, it's likely that the bug should be filed in [Pleroma-FE](https://git.pleroma.social/pleroma/pleroma-fe/issues/new) repository.
-->
### Environment
* Installation type (OTP or From Source):
* Pleroma version (could be found in the "Version" tab of settings in Pleroma-FE):
* Elixir version (`elixir -v` for from source installations, N/A for OTP):
* Operating system:
* PostgreSQL version (`psql -V`):
### Bug description

View file

@ -0,0 +1,6 @@
### Release checklist
* [ ] Bump version in `mix.exs`
* [ ] Compile a changelog
* [ ] Create an MR with an announcement to pleroma.social
* [ ] Tag the release
* [ ] Merge `stable` into `develop` (in case the fixes are already in develop, use `git merge -s ours --no-commit` and manually merge the changelogs)

View file

@ -1,197 +0,0 @@
variables:
- &scw-secrets
- SCW_ACCESS_KEY
- SCW_SECRET_KEY
- SCW_DEFAULT_ORGANIZATION_ID
- &setup-hex "mix local.hex --force && mix local.rebar --force"
- &on-release
when:
event:
- push
- tag
branch:
- develop
- stable
- refs/tags/v*
- refs/tags/stable-*
- &on-stable
when:
event:
- push
- tag
branch:
- stable
- refs/tags/stable-*
- &on-point-release
when:
event:
- push
branch:
- develop
- stable
- &on-pr-open
when:
event:
- pull_request
- &tag-build "export BUILD_TAG=$${CI_COMMIT_TAG:-\"$CI_COMMIT_BRANCH\"} && export PLEROMA_BUILD_BRANCH=$BUILD_TAG"
- &clean "(rm -rf release || true) && (rm -rf _build || true) && (rm -rf /root/.mix)"
- &mix-clean "mix deps.clean --all && mix clean"
services:
postgres:
image: postgres:13
when:
event:
- pull_request
environment:
POSTGRES_DB: pleroma_test
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
pipeline:
lint:
<<: *on-pr-open
image: akkoma/ci-base:1.14
commands:
- mix local.hex --force
- mix local.rebar --force
- mix format --check-formatted
build:
image: akkoma/ci-base:1.14
<<: *on-pr-open
environment:
MIX_ENV: test
POSTGRES_DB: pleroma_test
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
DB_HOST: postgres
commands:
- mix local.hex --force
- mix local.rebar --force
- mix deps.get
- mix compile
test:
image: akkoma/ci-base:1.14
<<: *on-pr-open
environment:
MIX_ENV: test
POSTGRES_DB: pleroma_test
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
DB_HOST: postgres
commands:
- mix local.hex --force
- mix local.rebar --force
- mix deps.get
- mix compile
- mix ecto.drop -f -q
- mix ecto.create
- mix ecto.migrate
- mix test --preload-modules --exclude erratic --exclude federated --max-cases 4
# Canonical amd64
ubuntu22:
image: hexpm/elixir:1.14.2-erlang-25.1.2-ubuntu-jammy-20220428
<<: *on-release
environment:
MIX_ENV: prod
DEBIAN_FRONTEND: noninteractive
commands:
- apt-get update && apt-get install -y cmake libmagic-dev rclone zip imagemagick libmagic-dev git build-essential g++ wget
- *clean
- echo "import Config" > config/prod.secret.exs
- *setup-hex
- *tag-build
- mix deps.get --only prod
- mix release --path release
- zip akkoma-ubuntu-jammy.zip -r release
release-ubuntu22:
image: akkoma/releaser
<<: *on-release
secrets: *scw-secrets
commands:
- export SOURCE=akkoma-ubuntu-jammy.zip
- export DEST=scaleway:akkoma-updates/$${CI_COMMIT_TAG:-"$CI_COMMIT_BRANCH"}/akkoma-ubuntu-jammy.zip
- /bin/sh /entrypoint.sh
- export DEST=scaleway:akkoma-updates/$${CI_COMMIT_TAG:-"$CI_COMMIT_BRANCH"}/akkoma-amd64-ubuntu-jammy.zip
- /bin/sh /entrypoint.sh
debian-bullseye:
image: hexpm/elixir:1.14.2-erlang-25.1.2-debian-bullseye-20221004
<<: *on-release
environment:
MIX_ENV: prod
DEBIAN_FRONTEND: noninteractive
commands:
- apt-get update && apt-get install -y cmake libmagic-dev rclone zip imagemagick libmagic-dev git build-essential gcc make g++ wget
- *clean
- echo "import Config" > config/prod.secret.exs
- *setup-hex
- *tag-build
- *mix-clean
- mix deps.get --only prod
- mix release --path release
- zip akkoma-amd64.zip -r release
release-debian:
image: akkoma/releaser
<<: *on-release
secrets: *scw-secrets
commands:
- export SOURCE=akkoma-amd64.zip
- export DEST=scaleway:akkoma-updates/$${CI_COMMIT_TAG:-"$CI_COMMIT_BRANCH"}/akkoma-amd64.zip
- /bin/sh /entrypoint.sh
- export DEST=scaleway:akkoma-updates/$${CI_COMMIT_TAG:-"$CI_COMMIT_BRANCH"}/akkoma-debian-stable.zip
- /bin/sh /entrypoint.sh
# Canonical amd64-musl
musl:
image: hexpm/elixir:1.14.2-erlang-25.1.2-alpine-3.16.2
<<: *on-stable
environment:
MIX_ENV: prod
commands:
- apk add git gcc g++ musl-dev make cmake file-dev rclone wget zip imagemagick
- *clean
- *setup-hex
- *mix-clean
- *tag-build
- mix deps.get --only prod
- mix release --path release
- zip akkoma-amd64-musl.zip -r release
release-musl:
image: akkoma/releaser
<<: *on-stable
secrets: *scw-secrets
commands:
- export SOURCE=akkoma-amd64-musl.zip
- export DEST=scaleway:akkoma-updates/$${CI_COMMIT_TAG:-"$CI_COMMIT_BRANCH"}/akkoma-amd64-musl.zip
- /bin/sh /entrypoint.sh
docs:
<<: *on-point-release
secrets:
- SCW_ACCESS_KEY
- SCW_SECRET_KEY
- SCW_DEFAULT_ORGANIZATION_ID
environment:
CI: "true"
image: python:3.10-slim
commands:
- apt-get update && apt-get install -y rclone wget git zip
- wget https://github.com/scaleway/scaleway-cli/releases/download/v2.5.1/scaleway-cli_2.5.1_linux_amd64
- mv scaleway-cli_2.5.1_linux_amd64 scaleway-cli
- chmod +x scaleway-cli
- ./scaleway-cli object config install type=rclone
- cd docs
- pip install -r requirements.txt
- mkdocs build
- zip -r docs.zip site/*
- cd site
- rclone copy . scaleway:akkoma-docs/$CI_COMMIT_BRANCH/

27
.woodpecker/.docs.yml Normal file
View file

@ -0,0 +1,27 @@
pipeline:
build:
when:
event:
- push
branch:
- develop
- stable
secrets:
- SCW_ACCESS_KEY
- SCW_SECRET_KEY
- SCW_DEFAULT_ORGANIZATION_ID
environment:
CI: "true"
image: python:3.10-slim
commands:
- apt-get update && apt-get install -y rclone wget git zip
- wget https://github.com/scaleway/scaleway-cli/releases/download/v2.5.1/scaleway-cli_2.5.1_linux_amd64
- mv scaleway-cli_2.5.1_linux_amd64 scaleway-cli
- chmod +x scaleway-cli
- ./scaleway-cli object config install type=rclone
- cd docs
- pip install -r requirements.txt
- mkdocs build
- zip -r docs.zip site/*
- cd site
- rclone copy . scaleway:akkoma-docs/$CI_COMMIT_BRANCH/

69
.woodpecker/.release.yml Normal file
View file

@ -0,0 +1,69 @@
variables:
- &scw-secrets
- SCW_ACCESS_KEY
- SCW_SECRET_KEY
- SCW_DEFAULT_ORGANIZATION_ID
- &setup-scw-s3 "wget https://github.com/scaleway/scaleway-cli/releases/download/v2.5.1/scaleway-cli_2.5.1_linux_amd64 && mv scaleway-cli_2.5.1_linux_amd64 scaleway-cli && chmod +x scaleway-cli && ./scaleway-cli object config install type=rclone"
- &setup-hex "mix local.hex --force && mix local.rebar --force"
- &build-on
when:
event:
- push
- tag
branch:
- develop
- stable
- refs/tags/v*
- refs/tags/stable-*
- docker-build
- &tag-build 'export BUILD_TAG=$${CI_COMMIT_TAG:-"$CI_COMMIT_BRANCH"} && export PLEROMA_BUILD_BRANCH=$BUILD_TAG'
- &clean "(rm -rf release || true) && (rm -rf _build || true) && (rm -rf /root/.mix) && (rm scaleway-cli || true) && (mix deps.clean --all || true)"
pipeline:
glibc:
image: hexpm/elixir:1.13.4-erlang-24.3.4.2-ubuntu-focal-20211006
<<: *build-on
secrets: *scw-secrets
environment:
MIX_ENV: prod
commands:
- apt-get update && apt-get install -y cmake libmagic-dev rclone zip imagemagick libmagic-dev git
- *clean
- *setup-scw-s3
- echo "import Mix.Config" > config/prod.secret.exs
- *setup-hex
- *tag-build
- mix deps.get --only prod
- mix release --path release
- zip akkoma-${tag}.zip -r release
- rclone copyto akkoma-${tag}.zip scaleway:akkoma-updates/$BUILD_TAG/akkoma-${tag}.zip
musl:
image: hexpm/elixir:1.13.4-erlang-24.3.4.2-alpine-3.16.0
<<: *build-on
secrets: *scw-secrets
environment:
MIX_ENV: prod
commands:
- apk add git gcc g++ musl-dev make cmake file-dev rclone wget zip imagemagick
- *clean
- *setup-scw-s3
- *setup-hex
- *tag-build
- mix deps.get --only prod
- mix release --path release
- zip akkoma-${tag}.zip -r release
- rclone copyto akkoma-${tag}.zip scaleway:akkoma-updates/$BUILD_TAG/akkoma-${tag}-musl.zip
docker:
image: woodpeckerci/plugin-docker-buildx
<<: *build-on
secrets: [docker_username, docker_password]
settings:
repo: akkoma/akkoma
dockerfile: Dockerfile
platforms: linux/arm/v7,linux/arm64/v8,linux/amd64
tag: [latest, alpine]

59
.woodpecker/.test.yml Normal file
View file

@ -0,0 +1,59 @@
matrix:
ELIXIR_VERSION:
- 1.13
pipeline:
lint:
when:
event:
- pull_request
image: pleromaforkci/ci-base:1.13
commands:
- mix local.hex --force
- mix local.rebar --force
- mix format --check-formatted
build:
image: pleromaforkci/ci-base:${ELIXIR_VERSION}
when:
event:
- pull_request
environment:
MIX_ENV: test
commands:
- mix local.hex --force
- mix local.rebar --force
- mix deps.get
- mix compile
test:
group: test
image: pleromaforkci/ci-base:${ELIXIR_VERSION}
when:
event:
- pull_request
environment:
MIX_ENV: test
POSTGRES_DB: pleroma_test
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
DB_HOST: postgres
commands:
- mix local.hex --force
- mix local.rebar --force
- mix deps.get
- mix ecto.drop -f -q
- mix ecto.create
- mix ecto.migrate
- mix test --preload-modules --exclude erratic --exclude federated --max-cases 4
services:
postgres:
image: postgres:13
when:
event:
- pull_request
environment:
POSTGRES_DB: pleroma_test
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres

View file

@ -4,121 +4,6 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
## Unreleased
## Added
- Config: HTTP timeout options, :pool\_timeout and :receive\_timeout
- Added statistic gathering about instances which do/don't have signed fetches when they request from us
- Ability to set a default post expiry time, after which the post will be deleted. If used in concert with ActivityExpiration MRF, the expiry which comes _sooner_ will be applied.
- Regular task to prune local transient activities
- Task to manually run the transient prune job (pleroma.database prune\_task)
- Ability to follow hashtags
## Changed
- MastoAPI: Accept BooleanLike input on `/api/v1/accounts/:id/follow` (fixes follows with mastodon.py)
- Relays from akkoma are now off by default
- NormalizeMarkup MRF is now on by default
- Follow/Block/Mute imports now spin off into *n* tasks to avoid the oban timeout
- Transient activities recieved from remote servers are no longer persisted in the database
- Overhauled static-fe view for logged-out users
## Upgrade Notes
- If you have an old instance, you will probably want to run `mix pleroma.database prune_task` in the foreground to catch it up with the history of your instance.
## 2022.11
## Added
- Officially supported docker release
- Ability to remove followers unilaterally without a block
- Scraping of nodeinfo from remote instances to display instance info
- `requested_by` in relationships when the user has requested to follow you
## Changed
- Follows no longer override domain blocks, a domain block is final
- Deletes are now the lowest priority to publish and will be handled after creates
- Domain blocks are now subdomain-matches by default
## Fixed
- Registrations via ldap are now compatible with the latest OTP24
## Update notes
- If you use LDAP and run from source, please update your elixir/erlang
to the latest. The changes in OTP24.3 are breaking.
- You can now remove the leading `*.` from domain blocks, but you do not have to.
## 2022.10
### Added
- Ability to sync frontend profiles between clients, with a name attached
- Status card generation will now use the media summary if it is available
### Changed
- Emoji updated to latest 15.0 draft
- **Breaking**: `/api/v1/pleroma/backups` endpoints now requires `read:backups` scope instead of `read:accounts`
- Verify that the signature on posts is not domain blocked, and belongs to the correct user
### Fixed
- OAuthPlug no longer joins with the database every call and uses the user cache
- Undo activities no longer try to look up by ID, and render correctly
- prevent false-errors from meilisearch
## 2022.09
### Added
- support for fedibird-fe, and non-breaking API parity for it to function
- support for setting instance languages in metadata
- support for reusing oauth tokens, and not requiring new authorizations
- the ability to obfuscate domains in your MRF descriptions
- automatic translation of statuses via DeepL or LibreTranslate
- ability to edit posts
- ability to react with remote emoji
### Changed
- MFM parsing is now done on the backend by a modified version of ilja's parser -> https://akkoma.dev/AkkomaGang/mfm-parser
- InlineQuotePolicy is now on by default
- Enable remote users to interact with posts
### Fixed
- Compatibility with latest meilisearch
- Resolution of nested mix tasks (i.e search.meilisearch) in OTP releases
- Elasticsearch returning likes and repeats, displaying as posts
- Ensure key generation happens at registration-time to prevent potential race-conditions
- Ensured websockets get closed on logout
- Allowed GoToSocial-style `?query_string` signatures
### Removed
- Non-finch HTTP adapters. `:tesla, :adapter` is now highly recommended to be set to the default.
## 2022.08
### Added
- extended runtime module support, see config cheatsheet
- quote posting; quotes are limited to public posts
### Changed
- quarantining is now considered absolutely; public activities are no longer
an exception.
- also merged quarantine and mrf reject - quarantine is now deprecated
- flavours:
- amd64 is built for debian stable. Compatible with ubuntu 20.
- ubuntu-jammy is built for... well, ubuntu 22 (LTS)
- amd64-musl is built for alpine 3.16
### Fixed
- Updated mastoFE path, for the newer version
### Removed
- Scrobbling support
- `/api/v1/pleroma/scrobble`
- `/api/v1/pleroma/accounts/{id}/scrobbles`
- Deprecated endpoints
- `/api/v1/pleroma/chats`
- `/api/v1/notifications/dismiss`
- `/api/v1/search`
- `/api/v1/statuses/{id}/card`
- Chats, they were half-baked. Just use PMs.
- Prometheus, it causes massive slowdown
## 2022.07
### Added
@ -259,7 +144,6 @@ you might end up in a situation where you don't have an ability to get it.
- Attachment dimensions and blurhashes are federated when available.
- Mastodon API: support `poll` notification.
- Pinned posts federation
- Possibility to discover users like `user@example.org`, while Akkoma is working on `akkoma.example.org`. Additional configuration required.
### Fixed
- Don't crash so hard when email settings are invalid.

View file

@ -1,24 +0,0 @@
# Akkoma Code of Conduct
The Akkoma project aims to be **enjoyable** for anyone to participate in, regardless of their identity or level of expertise. To achieve this, the community must create an environment which is **safe** and **equitable**; the following guidelines have been created with these goals in mind.
1. **Treat individuals with respect.** Differing experiences and viewpoints deserve to be respected, and bigotry and harassment are not tolerated under any circumstances.
- Individuals should at all times be treated as equals, regardless of their age, gender, sexuality, race, ethnicity, _or any other characteristic_, intrinsic or otherwise.
- Behaviour that is harmful in nature should be addressed and corrected *regardless of intent*.
- Respect personal boundaries and ask for clarification whenever they are unclear.
- (Obviously, hate does not count as merely a "differing viewpoint", because it is harmful in nature.)
2. **Be understanding of differences in communication.** Not everyone is aware of unspoken social cues, and speech that is not intended to be offensive should not be treated as such simply due to an atypical manner of communication.
- Somebody who speaks bluntly is not necessarily rude, and somebody who swears a lot is not necessarily volatile.
- Try to confirm your interpretation of their intent rather than assuming bad faith.
- Someone may not communicate as, or come across as a picture of "professionalism", but this should not be seen as a reason to dismiss them. This is a **casual** space, and communication styles can reflect that.
3. **"Uncomfortable" does not mean "unsafe".** In an ideal world, the community would be safe, equitable, enjoyable, *and* comfortable for all members at all times. Unfortunately, this is not always possible in reality.
- Safety and equity will be prioritized over comfort whenever it is necessary to do so.
- Weaponizing one's own discomfort to deflect accountability or censor an individual (e.g. "white fragility") is a form of discriminatory conduct.
4. **Let people grow from their mistakes.** Nobody is perfect; even the most well-meaning individual can do something hurtful. Everyone should be given a fair opportunity to explain themselves and correct their behaviour. Portraying someone as inherently malicious prevents improvement and shifts focus away from the *action* that was problematic.
- Avoid bringing up past events that do not accurately reflect an individual's current actions or beliefs. (This is, of course, different from providing evidence of a recurring pattern of behaviour.)
---
This document was adapted from one created by ~keith as part of punks default repository template, and is licensed under CC-BY-SA 4.0. The original template is here: <https://bytes.keithhacks.cyou/keith/default-template>

43
COPYING
View file

@ -1,15 +1,12 @@
Unless otherwise stated this repository is
Copyright © 2017-2022 Pleroma Authors <https://pleroma.social/>
Copyright © 2022 Akkoma Authors <https://akkoma.social/>
and is distributed under The GNU Affero General Public License Version 3, you
should have received a copy of the license file as AGPL-3.
Unless otherwise stated this repository is copyright © 2017-2021
Pleroma Authors <https://pleroma.social/>, and is distributed under
The GNU Affero General Public License Version 3, you should have received a
copy of the license file as AGPL-3.
---
Files inside docs directory are
Copyright © 2021-2022 Pleroma Authors <https://pleroma.social/>
Copyright © 2022 Akkoma Authors <https://akkoma.social/>
and are distributed under the Creative Commons
Files inside docs directory are copyright © 2021 Pleroma Authors
<https://pleroma.social/>, and are distributed under the Creative Commons
Attribution 4.0 International license, you should have received
a copy of the license file as CC-BY-4.0.
@ -19,7 +16,17 @@ The following files are copyright © 2019 shitposter.club, and are distributed
under the Creative Commons Attribution-ShareAlike 4.0 International license,
you should have received a copy of the license file as CC-BY-SA-4.0.
priv/static/images/pleroma-fox-tan.png
priv/static/images/pleroma-fox-tan-smol.png
priv/static/images/pleroma-tan.png
---
The following files are copyright © 2019 shitposter.club, and are distributed
under the Creative Commons Attribution 4.0 International license, you should
have received a copy of the license file as CC-BY-4.0.
priv/static/images/pleroma-fox-tan-shy.png
---
@ -28,4 +35,22 @@ The following files are copyright © 2017-2020 Pleroma Authors
Attribution-ShareAlike 4.0 International license, you should have received
a copy of the license file as CC-BY-SA-4.0.
priv/static/images/avi.png
priv/static/images/banner.png
priv/static/instance/thumbnail.jpeg
---
All photos published on Unsplash can be used for free. You can use them for
commercial and noncommercial purposes. You do not need to ask permission from
or provide credit to the photographer or Unsplash, although it is appreciated
when possible.
More precisely, Unsplash grants you an irrevocable, nonexclusive, worldwide
copyright license to download, copy, modify, distribute, perform, and use
photos from Unsplash for free, including for commercial purposes, without
permission from or attributing the photographer or Unsplash. This license
does not include the right to compile photos from Unsplash to replicate
a similar or competing service.
priv/static/images/city.jpg

View file

@ -1,8 +1,21 @@
FROM hexpm/elixir:1.13.4-erlang-24.3.4.5-alpine-3.15.6
FROM hexpm/elixir:1.13.4-erlang-24.3.4.2-alpine-3.16.0 as build
COPY . .
ENV MIX_ENV=prod
ARG HOME=/opt/akkoma
RUN apk add git gcc g++ musl-dev make cmake file-dev &&\
echo "import Config" > config/prod.secret.exs &&\
mix local.hex --force &&\
mix local.rebar --force &&\
mix deps.get --only prod &&\
mkdir release &&\
mix release --path release
FROM alpine:3.16
ARG BUILD_DATE
ARG VCS_REF
LABEL org.opencontainers.image.title="akkoma" \
org.opencontainers.image.description="Akkoma for Docker" \
@ -13,21 +26,25 @@ LABEL org.opencontainers.image.title="akkoma" \
org.opencontainers.image.revision=$VCS_REF \
org.opencontainers.image.created=$BUILD_DATE
RUN apk add git gcc g++ musl-dev make cmake file-dev exiftool ffmpeg imagemagick libmagic ncurses postgresql-client
ARG HOME=/opt/akkoma
ARG DATA=/var/lib/akkoma
RUN apk update &&\
apk add exiftool ffmpeg imagemagick libmagic ncurses postgresql-client &&\
adduser --system --shell /bin/false --home ${HOME} akkoma &&\
mkdir -p ${DATA}/uploads &&\
mkdir -p ${DATA}/static &&\
chown -R akkoma ${DATA} &&\
mkdir -p /etc/akkoma &&\
chown -R akkoma /etc/akkoma
USER akkoma
COPY --from=build --chown=akkoma:0 /release ${HOME}
COPY ./config/docker.exs /etc/akkoma/config.exs
COPY ./docker-entrypoint.sh ${HOME}
EXPOSE 4000
ARG UID=1000
ARG GID=1000
ARG UNAME=akkoma
RUN addgroup -g $GID $UNAME
RUN adduser -u $UID -G $UNAME -D -h $HOME $UNAME
WORKDIR /opt/akkoma
USER $UNAME
RUN mix local.hex --force &&\
mix local.rebar --force
CMD ["/opt/akkoma/docker-entrypoint.sh"]
ENTRYPOINT ["/opt/akkoma/docker-entrypoint.sh"]

View file

@ -2,60 +2,39 @@
*a smallish microblogging platform, aka the cooler pleroma*
![English OK](https://img.shields.io/badge/English-OK-blueviolet) ![日本語OK](https://img.shields.io/badge/%E6%97%A5%E6%9C%AC%E8%AA%9E-OK-blueviolet)
## About
This is a fork of Pleroma, which is a microblogging server software that can federate (= exchange messages with) other servers that support ActivityPub. What that means is that you can host a server for yourself or your friends and stay in control of your online identity, but still exchange messages with people on larger servers. Akkoma will federate with all servers that implement ActivityPub, like Friendica, GNU Social, Hubzilla, Mastodon, Misskey, Peertube, and Pixelfed.
This is a fork of Pleroma, which is a microblogging server software that can federate (= exchange messages with) other servers that support ActivityPub. What that means is that you can host a server for yourself or your friends and stay in control of your online identity, but still exchange messages with people on larger servers. Pleroma will federate with all servers that implement ActivityPub, like Friendica, GNU Social, Hubzilla, Mastodon, Misskey, Peertube, and Pixelfed.
Akkoma is written in Elixir and uses PostgreSQL for data storage.
Akkoma is written in Elixir and uses PostgresSQL for data storage.
For clients it supports the [Mastodon client API](https://docs.joinmastodon.org/api/guidelines/) with Pleroma extensions (see the API section on <https://docs.akkoma.dev/stable/>).
- [Client Applications for Akkoma](https://docs.akkoma.dev/stable/clients/)
## Differences with Pleroma
Akkoma is a faster-paced fork, it has a varied and potentially experimental feature set tailored specifically to the corner of the fediverse inhabited by the project
creator and contributors.
This should not be considered a one-for-one match with pleroma; it is more opinionated in many ways, and has a smaller community (which is good or
bad depending on your view)
For example, Akkoma has:
- Custom Emoji reactions (compatible with misskey)
- Misskey-flavoured markdown support
- Elasticsearch and Meilisearch support for search
- Mastodon frontend (Glitch-Soc and Fedibird flavours) support
- Automatic post translation via DeepL or LibreTranslate
- A multitude of heavy modifications to the Pleroma Frontend (Pleroma-FE)
- The "bubble" concept, in which instance administrators can choose closely-related instances to make a "community of communities", so to say
And takes a more opinionated stance on issues like Domain blocks, which are enforced far more on Akkoma.
Take a look at the Changelog if you want a full list of recent changes, everything since 3.0 has been Akkoma.
- [Client Applications for Pleroma](https://docs.akkoma.dev/stable/clients/)
## Installation
### OTP releases (Recommended)
If you are running Linux (glibc or musl) on x86, the recommended way to install Akkoma is by using OTP releases. OTP releases are as close as you can get to binary releases with Erlang/Elixir. The release is self-contained, and provides everything needed to boot it. The installation instructions are available [here](https://docs.akkoma.dev/stable/installation/otp_en/).
If you are running Linux (glibc or musl) on x86, the recommended way to install Pleroma is by using OTP releases. OTP releases are as close as you can get to binary releases with Erlang/Elixir. The release is self-contained, and provides everything needed to boot it. The installation instructions are available [here](https://docs.akkoma.dev/stable/installation/otp_en/).
### From Source
If your platform is not supported, or you just want to be able to edit the source code easily, you may install Akkoma from source.
If your platform is not supported, or you just want to be able to edit the source code easily, you may install Pleroma from source.
- [Alpine Linux](https://docs.akkoma.dev/stable/installation/alpine_linux_en/)
- [Arch Linux](https://docs.akkoma.dev/stable/installation/arch_linux_en/)
- [Debian-based](https://docs.akkoma.dev/stable/installation/debian_based_en/)
- [Debian-based (jp)](https://docs.akkoma.dev/stable/installation/debian_based_jp/)
- [FreeBSD](https://docs.akkoma.dev/stable/installation/freebsd_en/)
- [Gentoo Linux](https://docs.akkoma.dev/stable/installation/gentoo_en/)
- [NetBSD](https://docs.akkoma.dev/stable/installation/netbsd_en/)
- [OpenBSD](https://docs.akkoma.dev/stable/installation/openbsd_en/)
- [OpenBSD (fi)](https://docs.akkoma.dev/stable/installation/openbsd_fi/)
### Docker
Docker installation is supported via [this setup](https://docs.akkoma.dev/stable/installation/docker_en/)
While we dont provide docker files, other people have written very good ones. Take a look at <https://github.com/angristan/docker-pleroma> or <https://glitch.sh/sn0w/pleroma-docker>.
### Compilation Troubleshooting
If you ever encounter compilation issues during the updating of Akkoma, you can try these commands and see if they fix things:
If you ever encounter compilation issues during the updating of Pleroma, you can try these commands and see if they fix things:
- `mix deps.clean --all`
- `mix local.rebar`
@ -64,4 +43,3 @@ If you ever encounter compilation issues during the updating of Akkoma, you can
## Documentation
- https://docs.akkoma.dev/stable
- https://docs.akkoma.dev/develop

View file

@ -1,2 +0,0 @@
untrusted comment: Akkoma Signing Key public key
RWQRlw8Ex/uTbvo1wB1yK75tQ5nXKilB/vrKdkL41bgZHL9aKP+7fSS5

View file

@ -48,7 +48,6 @@
config :pleroma, Pleroma.Repo,
telemetry_event: [Pleroma.Repo.Instrumenter],
queue_target: 20_000,
migration_lock: nil
config :pleroma, Pleroma.Captcha,
@ -180,14 +179,12 @@
# Configures http settings, upstream proxy etc.
config :pleroma, :http,
pool_timeout: :timer.seconds(5),
receive_timeout: :timer.seconds(15),
proxy_url: nil,
user_agent: :default,
adapter: []
config :pleroma, :instance,
name: "Akkoma",
name: "Pleroma",
email: "example@example.com",
notify_email: "noreply@example.com",
description: "Akkoma: The cooler fediverse server",
@ -200,7 +197,6 @@
avatar_upload_limit: 2_000_000,
background_upload_limit: 4_000_000,
banner_upload_limit: 4_000_000,
languages: ["en"],
poll_limits: %{
max_options: 20,
max_option_chars: 200,
@ -217,8 +213,9 @@
federation_publisher_modules: [
Pleroma.Web.ActivityPub.Publisher
],
allow_relay: false,
allow_relay: true,
public: true,
quarantined_instances: [],
static_dir: "instance/static/",
allowed_post_formats: [
"text/plain",
@ -262,9 +259,7 @@
show_reactions: true,
password_reset_token_validity: 60 * 60 * 24,
profile_directory: true,
privileged_staff: false,
local_bubble: [],
max_frontend_settings_json_chars: 100_000
privileged_staff: false
config :pleroma, :welcome,
direct_message: [
@ -272,6 +267,11 @@
sender_nickname: nil,
message: nil
],
chat_message: [
enabled: false,
sender_nickname: nil,
message: nil
],
email: [
enabled: false,
sender: nil,
@ -314,19 +314,19 @@
logo: "/static/logo.svg",
logoMargin: ".1em",
logoMask: true,
minimalScopesMode: false,
noAttachmentLinks: false,
nsfwCensorImage: "",
postContentType: "text/plain",
redirectRootLogin: "/main/friends",
redirectRootNoLogin: "/main/public",
redirectRootNoLogin: "/main/all",
scopeCopy: true,
sidebarRight: false,
showFeaturesPanel: true,
showInstanceSpecificPanel: false,
subjectLineBehavior: "email",
theme: "pleroma-dark",
webPushNotifications: false,
conversationDisplay: "linear"
webPushNotifications: false
},
masto_fe: %{
showInstanceSpecificPanel: true
@ -411,8 +411,6 @@
accept: [],
reject: []
config :pleroma, :mrf_inline_quote, prefix: "RE"
# threshold of 7 days
config :pleroma, :mrf_object_age,
threshold: 604_800,
@ -489,7 +487,8 @@
config :pleroma, :http_security,
enabled: true,
sts: false,
sts_max_age: 63_072_000,
sts_max_age: 31_536_000,
ct_max_age: 2_592_000,
referrer_policy: "same-origin"
config :cors_plug,
@ -568,18 +567,12 @@
attachments_cleanup: 1,
new_users_digest: 1,
mute_expire: 5,
search_indexing: 10,
nodeinfo_fetcher: 1,
database_prune: 1
],
plugins: [
Oban.Plugins.Pruner,
{Oban.Plugins.Reindexer, schedule: "@weekly"}
search_indexing: 10
],
plugins: [Oban.Plugins.Pruner],
crontab: [
{"0 0 * * 0", Pleroma.Workers.Cron.DigestEmailsWorker},
{"0 0 * * *", Pleroma.Workers.Cron.NewUsersDigestWorker},
{"0 3 * * *", Pleroma.Workers.Cron.PruneDatabaseWorker}
{"0 0 * * *", Pleroma.Workers.Cron.NewUsersDigestWorker}
]
config :pleroma, :workers,
@ -587,28 +580,6 @@
federator_incoming: 5,
federator_outgoing: 5,
search_indexing: 2
],
timeout: [
activity_expiration: :timer.seconds(5),
token_expiration: :timer.seconds(5),
filter_expiration: :timer.seconds(5),
backup: :timer.seconds(900),
federator_incoming: :timer.seconds(10),
federator_outgoing: :timer.seconds(10),
ingestion_queue: :timer.seconds(5),
web_push: :timer.seconds(5),
mailer: :timer.seconds(5),
transmogrifier: :timer.seconds(5),
scheduled_activities: :timer.seconds(5),
poll_notifications: :timer.seconds(5),
background: :timer.seconds(5),
remote_fetcher: :timer.seconds(10),
attachments_cleanup: :timer.seconds(900),
new_users_digest: :timer.seconds(10),
mute_expire: :timer.seconds(5),
search_indexing: :timer.seconds(5),
nodeinfo_fetcher: :timer.seconds(10),
database_prune: :timer.minutes(10)
]
config :pleroma, Pleroma.Formatter,
@ -667,6 +638,13 @@
config :pleroma, Pleroma.Emails.NewUsersDigestEmail, enabled: false
config :prometheus, Pleroma.Web.Endpoint.MetricsExporter,
enabled: false,
auth: false,
ip_whitelist: [],
path: "/api/pleroma/app_metrics",
format: :text
config :pleroma, Pleroma.ScheduledActivity,
daily_user_limit: 25,
total_user_limit: 300,
@ -742,7 +720,6 @@
config :pleroma, :frontends,
primary: %{"name" => "pleroma-fe", "ref" => "stable"},
admin: %{"name" => "admin-fe", "ref" => "stable"},
mastodon: %{"name" => "mastodon-fe", "ref" => "akkoma"},
swagger: %{
"name" => "swagger-ui",
"ref" => "stable",
@ -761,18 +738,9 @@
"mastodon-fe" => %{
"name" => "mastodon-fe",
"git" => "https://akkoma.dev/AkkomaGang/masto-fe",
"build_url" =>
"https://akkoma-updates.s3-website.fr-par.scw.cloud/frontend/${ref}/masto-fe.zip",
"build_url" => "https://akkoma-updates.s3-website.fr-par.scw.cloud/frontend/masto-fe.zip",
"build_dir" => "distribution",
"ref" => "akkoma"
},
"fedibird-fe" => %{
"name" => "fedibird-fe",
"git" => "https://akkoma.dev/AkkomaGang/fedibird-fe",
"build_url" =>
"https://akkoma-updates.s3-website.fr-par.scw.cloud/frontend/${ref}/fedibird-fe.zip",
"build_dir" => "distribution",
"ref" => "akkoma"
"ref" => "develop"
},
"admin-fe" => %{
"name" => "admin-fe",
@ -783,10 +751,10 @@
},
"soapbox-fe" => %{
"name" => "soapbox-fe",
"git" => "https://gitlab.com/soapbox-pub/soapbox",
"git" => "https://gitlab.com/soapbox-pub/soapbox-fe",
"build_url" =>
"https://gitlab.com/soapbox-pub/soapbox/-/jobs/artifacts/${ref}/download?job=build-production",
"ref" => "v2.0.0",
"https://gitlab.com/soapbox-pub/soapbox-fe/-/jobs/artifacts/${ref}/download?job=build-production",
"ref" => "v1.0.0",
"build_dir" => "static"
},
# For developers - enables a swagger frontend to view the openapi spec
@ -825,15 +793,13 @@
config :pleroma, :mrf,
policies: [Pleroma.Web.ActivityPub.MRF.ObjectAgePolicy, Pleroma.Web.ActivityPub.MRF.TagPolicy],
transparency: true,
transparency_exclusions: [],
transparency_obfuscate_domains: []
transparency_exclusions: []
config :ex_aws, http_client: Pleroma.HTTP.ExAws
config :web_push_encryption, http_client: Pleroma.HTTP.WebPush
config :pleroma, :instances_favicons, enabled: true
config :pleroma, :instances_nodeinfo, enabled: true
config :pleroma, :instances_favicons, enabled: false
config :floki, :html_parser, Floki.HTMLParser.FastHtml
@ -850,8 +816,6 @@
{Pleroma.Search, [max_running: 30, max_waiting: 50]}
]
config :pleroma, Pleroma.Web.WebFinger, domain: nil, update_nickname_on_user_fetch: true
config :pleroma, Pleroma.Search, module: Pleroma.Search.DatabaseSearch
config :pleroma, Pleroma.Search.Meilisearch,
@ -875,19 +839,6 @@
}
}
config :pleroma, :translator,
enabled: false,
module: Pleroma.Akkoma.Translators.DeepL
config :pleroma, :deepl,
# either :free or :pro
tier: :free,
api_key: ""
config :pleroma, :libre_translate,
url: "http://127.0.0.1:5000",
api_key: nil
# Import environment specific config. This must remain at the bottom
# of this file so it overrides the configuration defined above.
import_config "#{Mix.env()}.exs"

View file

@ -509,16 +509,6 @@
"Pleroma"
]
},
%{
key: :languages,
type: {:list, :string},
description: "Languages the instance uses",
suggestions: [
"en",
"ja",
"fr"
]
},
%{
key: :email,
label: "Admin Email Address",
@ -691,8 +681,8 @@
key: :public,
type: :boolean,
description:
"Switching this on will allow unauthenticated users access to all public resources on your instance" <>
" Switching it off is useful for disabling the Local Timeline and The Whole Known Network. " <>
"Makes the client API in authenticated mode-only except for user-profiles." <>
" Useful for disabling the Local Timeline and The Whole Known Network. " <>
" Note: when setting to `false`, please also check `:restrict_unauthenticated` setting."
},
%{
@ -701,7 +691,7 @@
key_placeholder: "instance",
value_placeholder: "reason",
description:
"(Deprecated, will be removed in next release) List of ActivityPub instances where activities will not be sent, and the reason for doing so",
"List of ActivityPub instances where private (DMs, followers-only) activities will not be sent and the reason for doing so",
suggestions: [
{"quarantined.com", "Reason"},
{"*.quarantined.com", "Reason"}
@ -723,8 +713,7 @@
"text/plain",
"text/html",
"text/markdown",
"text/bbcode",
"text/x.misskeymarkdown"
"text/bbcode"
]
},
%{
@ -957,13 +946,7 @@
key: :privileged_staff,
type: :boolean,
description:
"Let moderators access sensitive data (e.g. updating user credentials, get password reset token, delete users, index and read private statuses)"
},
%{
key: :local_bubble,
type: {:list, :string},
description:
"List of instances that make up your local bubble (closely-related instances). Used to populate the 'bubble' timeline (domain only)."
"Let moderators access sensitive data (e.g. updating user credentials, get password reset token, delete users, index and read private statuses and chats)"
}
]
},
@ -1001,6 +984,35 @@
}
]
},
%{
key: :chat_message,
type: :keyword,
descpiption: "Chat message settings",
children: [
%{
key: :enabled,
type: :boolean,
description: "Enables sending a chat message to newly registered users"
},
%{
key: :message,
type: :string,
description:
"A message that will be sent to newly registered users as a chat message",
suggestions: [
"Hello, welcome on board!"
]
},
%{
key: :sender_nickname,
type: :string,
description: "The nickname of the local user that sends a welcome chat message",
suggestions: [
"lain"
]
}
]
},
%{
key: :email,
type: :keyword,
@ -1180,6 +1192,7 @@
hideFilteredStatuses: false,
hideMutedPosts: false,
hidePostStats: false,
hideSitename: false,
hideUserStats: false,
loginMethod: "password",
logo: "/static/logo.svg",
@ -1227,13 +1240,6 @@
type: :boolean,
description: "Enables green text on lines prefixed with the > character"
},
%{
key: :conversationDisplay,
label: "Conversation display style",
type: :string,
description: "How to display conversations (linear or tree)",
suggestions: ["linear", "tree"]
},
%{
key: :hideFilteredStatuses,
label: "Hide Filtered Statuses",
@ -1252,6 +1258,12 @@
type: :boolean,
description: "Hide notices statistics (repeats, favorites, ...)"
},
%{
key: :hideSitename,
label: "Hide Sitename",
type: :boolean,
description: "Hides instance name from PleromaFE banner"
},
%{
key: :hideUserStats,
label: "Hide user stats",
@ -1282,6 +1294,14 @@
"By default it assumes logo used will be monochrome with alpha channel to be compatible with both light and dark themes. " <>
"If you want a colorful logo you must disable logoMask."
},
%{
key: :minimalScopesMode,
label: "Minimal scopes mode",
type: :boolean,
description:
"Limit scope selection to Direct, User default, and Scope of post replying to. " <>
"Also prevents replying to a DM with a public post from PleromaFE."
},
%{
key: :nsfwCensorImage,
label: "NSFW Censor Image",
@ -1295,13 +1315,7 @@
label: "Post Content Type",
type: {:dropdown, :atom},
description: "Default post formatting option",
suggestions: [
"text/plain",
"text/html",
"text/markdown",
"text/bbcode",
"text/x.misskeymarkdown"
]
suggestions: ["text/plain", "text/html", "text/markdown", "text/bbcode"]
},
%{
key: :redirectRootNoLogin,
@ -1359,48 +1373,6 @@
type: :string,
description: "Which theme to use. Available themes are defined in styles.json",
suggestions: ["pleroma-dark"]
},
%{
key: :showPanelNavShortcuts,
label: "Show timeline panel nav shortcuts",
type: :boolean,
description: "Whether to put timeline nav tabs on the top of the panel"
},
%{
key: :showNavShortcuts,
label: "Show navbar shortcuts",
type: :boolean,
description: "Whether to put extra navigation options on the navbar"
},
%{
key: :showWiderShortcuts,
label: "Increase navbar shortcut spacing",
type: :boolean,
description: "Whether to add extra space between navbar icons"
},
%{
key: :hideSiteFavicon,
label: "Hide site favicon",
type: :boolean,
description: "Whether to hide the instance favicon from the navbar"
},
%{
key: :hideSiteName,
label: "Hide site name",
type: :boolean,
description: "Whether to hide the site name from the navbar"
},
%{
key: :renderMisskeyMarkdown,
label: "Render misskey markdown",
type: :boolean,
description: "Whether to render Misskey-flavoured markdown"
},
%{
key: :stopGifs,
label: "Stop Gifs",
type: :boolean,
description: "Whether to pause animated images until they're hovered on"
}
]
},
@ -1493,14 +1465,13 @@
%{
key: :theme_color,
type: :string,
description: "Describe the theme color of the app - this is only used for mastodon-fe",
description: "Describe the theme color of the app",
suggestions: ["#282c37", "mediumpurple"]
},
%{
key: :background_color,
type: :string,
description:
"Describe the background color of the app - this is only used for mastodon-fe",
description: "Describe the background color of the app",
suggestions: ["#191b22", "aliceblue"]
}
]
@ -1707,11 +1678,6 @@
type: :boolean,
description: "Sign object fetches with HTTP signatures"
},
%{
key: :authorized_fetch_mode,
type: :boolean,
description: "Require HTTP signatures on AP fetches"
},
%{
key: :note_replies_output_limit,
type: :integer,
@ -1756,7 +1722,14 @@
label: "STS max age",
type: :integer,
description: "The maximum age for the Strict-Transport-Security header if sent",
suggestions: [63_072_000]
suggestions: [31_536_000]
},
%{
key: :ct_max_age,
label: "CT max age",
type: :integer,
description: "The maximum age for the Expect-CT header if sent",
suggestions: [2_592_000]
},
%{
key: :referrer_policy,
@ -1978,32 +1951,6 @@
federator_incoming: 5,
federator_outgoing: 5
]
},
%{
key: :timeout,
type: {:keyword, :integer},
description: "Timeout for jobs, per `Oban` queue, in ms",
suggestions: [
activity_expiration: :timer.seconds(5),
token_expiration: :timer.seconds(5),
filter_expiration: :timer.seconds(5),
backup: :timer.seconds(900),
federator_incoming: :timer.seconds(10),
federator_outgoing: :timer.seconds(10),
ingestion_queue: :timer.seconds(5),
web_push: :timer.seconds(5),
mailer: :timer.seconds(5),
transmogrifier: :timer.seconds(5),
scheduled_activities: :timer.seconds(5),
poll_notifications: :timer.seconds(5),
background: :timer.seconds(5),
remote_fetcher: :timer.seconds(10),
attachments_cleanup: :timer.seconds(900),
new_users_digest: :timer.seconds(10),
mute_expire: :timer.seconds(5),
search_indexing: :timer.seconds(5),
nodeinfo_fetcher: :timer.seconds(10)
]
}
]
},
@ -2658,6 +2605,27 @@
}
]
},
%{
group: :pleroma,
key: :shout,
type: :group,
description: "Pleroma shout settings",
children: [
%{
key: :enabled,
type: :boolean,
description: "Enables the backend Shoutbox chat feature."
},
%{
key: :limit,
type: :integer,
description: "Shout message character limit.",
suggestions: [
5_000
]
}
]
},
%{
group: :pleroma,
key: :http,
@ -2665,28 +2633,12 @@
type: :group,
description: "HTTP settings",
children: [
%{
key: :pool_timeout,
label: "HTTP Pool Request Timeout",
type: :integer,
description: "Timeout for initiating HTTP requests (in ms, default 5000)",
suggestions: [5000]
},
%{
key: :receive_timeout,
label: "HTTP Receive Timeout",
type: :integer,
description:
"Timeout for waiting on remote servers to respond to HTTP requests (in ms, default 15000)",
suggestions: [15000]
},
%{
key: :proxy_url,
label: "Proxy URL",
type: :string,
description:
"Proxy URL - of the format http://host:port. Advise setting in .exs instead of admin-fe due to this being set at boot-time.",
suggestions: ["http://localhost:3128"]
type: [:string, :tuple],
description: "Proxy URL",
suggestions: ["localhost:9020", {:socks5, :localhost, 3090}]
},
%{
key: :user_agent,
@ -3005,7 +2957,8 @@
key: :restrict_unauthenticated,
label: "Restrict Unauthenticated",
type: :group,
description: "Disallow unauthenticated viewing of timelines, user profiles and statuses.",
description:
"Disallow viewing timelines, user profiles and statuses for unauthenticated users.",
children: [
%{
key: :timelines,
@ -3015,12 +2968,12 @@
%{
key: :local,
type: :boolean,
description: "Disallow viewing the public timeline."
description: "Disallow view public timeline."
},
%{
key: :federated,
type: :boolean,
description: "Disallow viewing the whole known network timeline."
description: "Disallow view federated timeline."
}
]
},
@ -3032,29 +2985,29 @@
%{
key: :local,
type: :boolean,
description: "Disallow viewing local user profiles."
description: "Disallow view local user profiles."
},
%{
key: :remote,
type: :boolean,
description: "Disallow viewing remote user profiles."
description: "Disallow view remote user profiles."
}
]
},
%{
key: :activities,
type: :map,
description: "Settings for posts.",
description: "Settings for statuses.",
children: [
%{
key: :local,
type: :boolean,
description: "Disallow viewing local posts."
description: "Disallow view local statuses."
},
%{
key: :remote,
type: :boolean,
description: "Disallow viewing remote posts."
description: "Disallow view remote statuses."
}
]
}
@ -3086,19 +3039,6 @@
}
]
},
%{
group: :pleroma,
key: :instances_nodeinfo,
type: :group,
description: "Control favicons for instances",
children: [
%{
key: :enabled,
type: :boolean,
description: "Allow/disallow getting instance nodeinfo"
}
]
},
%{
group: :ex_aws,
key: :s3,
@ -3149,12 +3089,6 @@
description: "Admin frontend",
children: installed_frontend_options
},
%{
key: :mastodon,
type: :map,
description: "Mastodon frontend",
children: installed_frontend_options
},
%{
key: :swagger,
type: :map,
@ -3232,6 +3166,43 @@
}
]
},
%{
group: :prometheus,
key: Pleroma.Web.Endpoint.MetricsExporter,
type: :group,
description: "Prometheus app metrics endpoint configuration",
children: [
%{
key: :enabled,
type: :boolean,
description: "[Pleroma extension] Enables app metrics endpoint."
},
%{
key: :ip_whitelist,
label: "IP Whitelist",
type: [{:list, :string}, {:list, :charlist}, {:list, :tuple}],
description: "Restrict access of app metrics endpoint to the specified IP addresses."
},
%{
key: :auth,
type: [:boolean, :tuple],
description: "Enables HTTP Basic Auth for app metrics endpoint.",
suggestion: [false, {:basic, "myusername", "mypassword"}]
},
%{
key: :path,
type: :string,
description: "App metrics endpoint URI path.",
suggestions: ["/api/pleroma/app_metrics"]
},
%{
key: :format,
type: :atom,
description: "App metrics endpoint output format.",
suggestions: [:text, :protobuf]
}
]
},
%{
group: :pleroma,
key: ConcurrentLimiter,
@ -3284,14 +3255,13 @@
group: :pleroma,
key: Pleroma.Search,
type: :group,
label: "Search",
description: "General search settings.",
children: [
%{
key: :module,
type: :module,
type: :keyword,
description: "Selected search module.",
suggestions: {:list_behaviour_implementations, Pleroma.Search.SearchBackend}
suggestion: [Pleroma.Search.DatabaseSearch, Pleroma.Search.Meilisearch]
}
]
},
@ -3316,7 +3286,7 @@
},
%{
key: :initial_indexing_chunk_size,
type: :integer,
type: :int,
description:
"Amount of posts in a batch when running the initial indexing operation. Should probably not be more than 100000" <>
" since there's a limit on maximum insert size",
@ -3327,7 +3297,6 @@
%{
group: :pleroma,
key: Pleroma.Search.Elasticsearch.Cluster,
label: "Elasticsearch",
type: :group,
description: "Elasticsearch settings.",
children: [
@ -3394,13 +3363,13 @@
},
%{
key: :bulk_page_size,
type: :integer,
type: :int,
description: "Size for bulk put requests, mostly used on building the index",
suggestion: [5000]
},
%{
key: :bulk_wait_interval,
type: :integer,
type: :int,
description: "Time to wait between bulk put requests (in ms)",
suggestion: [15_000]
}
@ -3409,66 +3378,5 @@
]
}
]
},
%{
group: :pleroma,
key: :translator,
type: :group,
description: "Translation Settings",
children: [
%{
key: :enabled,
type: :boolean,
description: "Is translation enabled?",
suggestion: [true, false]
},
%{
key: :module,
type: :module,
description: "Translation module.",
suggestions: {:list_behaviour_implementations, Pleroma.Akkoma.Translator}
}
]
},
%{
group: :pleroma,
key: :deepl,
label: "DeepL",
type: :group,
description: "DeepL Settings.",
children: [
%{
key: :tier,
type: {:dropdown, :atom},
description: "API Tier",
suggestions: [:free, :pro]
},
%{
key: :api_key,
type: :string,
description: "API key for DeepL",
suggestions: [nil]
}
]
},
%{
group: :pleroma,
key: :libre_translate,
type: :group,
description: "LibreTranslate Settings.",
children: [
%{
key: :url,
type: :string,
description: "URL for libretranslate",
suggestion: [nil]
},
%{
key: :api_key,
type: :string,
description: "API key for libretranslate",
suggestion: [nil]
}
]
}
]

View file

@ -24,11 +24,11 @@
config :web_push_encryption, :vapid_details, subject: "mailto:#{System.get_env("NOTIFY_EMAIL")}"
config :pleroma, :database, rum_enabled: false
config :pleroma, :instance, static_dir: "/var/lib/akkoma/static"
config :pleroma, Pleroma.Uploaders.Local, uploads: "/var/lib/akkoma/uploads"
config :pleroma, :instance, static_dir: "/var/lib/pleroma/static"
config :pleroma, Pleroma.Uploaders.Local, uploads: "/var/lib/pleroma/uploads"
# We can't store the secrets in this file, since this is baked into the docker image
if not File.exists?("/var/lib/akkoma/secret.exs") do
if not File.exists?("/var/lib/pleroma/secret.exs") do
secret = :crypto.strong_rand_bytes(64) |> Base.encode64() |> binary_part(0, 64)
signing_salt = :crypto.strong_rand_bytes(8) |> Base.encode64() |> binary_part(0, 8)
{web_push_public_key, web_push_private_key} = :crypto.generate_key(:ecdh, :prime256v1)
@ -52,16 +52,16 @@
web_push_private_key: Base.url_encode64(web_push_private_key, padding: false)
)
File.write("/var/lib/akkoma/secret.exs", secret_file)
File.write("/var/lib/pleroma/secret.exs", secret_file)
end
import_config("/var/lib/akkoma/secret.exs")
import_config("/var/lib/pleroma/secret.exs")
# For additional user config
if File.exists?("/var/lib/akkoma/config.exs"),
do: import_config("/var/lib/akkoma/config.exs"),
if File.exists?("/var/lib/pleroma/config.exs"),
do: import_config("/var/lib/pleroma/config.exs"),
else:
File.write("/var/lib/akkoma/config.exs", """
File.write("/var/lib/pleroma/config.exs", """
import Config
# For additional configuration outside of environmental variables

4
config/emoji.txt Normal file
View file

@ -0,0 +1,4 @@
firefox, /emoji/Firefox.gif, Gif,Fun
blank, /emoji/blank.png, Fun
dinosaur, /emoji/dino walking.gif, Gif
100a, /emoji/100a.png, Fun

View file

@ -126,8 +126,6 @@
config :pleroma, :cachex, provider: Pleroma.CachexMock
config :pleroma, Pleroma.Web.WebFinger, update_nickname_on_user_fetch: false
config :pleroma, :side_effects,
ap_streamer: Pleroma.Web.ActivityPub.ActivityPubMock,
logger: Pleroma.LoggerMock
@ -139,8 +137,6 @@
# Reduce recompilation time
# https://dashbit.co/blog/speeding-up-re-compilation-of-elixir-projects
config :phoenix, :plug_init_mode, :runtime
config :pleroma, :instances_favicons, enabled: false
config :pleroma, :instances_nodeinfo, enabled: false
if File.exists?("./config/test.secret.exs") do
import_config "test.secret.exs"

View file

@ -1,61 +0,0 @@
version: "3.7"
services:
db:
image: akkoma-db:latest
build: ./docker-resources/database
restart: unless-stopped
user: ${DOCKER_USER}
environment: {
# This might seem insecure but is usually not a problem.
# You should leave this at the "akkoma" default.
# The DB is only reachable by containers in the same docker network,
# and is not exposed to the open internet.
#
# If you do change this, remember to update "config.exs".
POSTGRES_DB: akkoma,
POSTGRES_USER: akkoma,
POSTGRES_PASSWORD: akkoma,
}
env_file:
- .env
volumes:
- type: bind
source: ./pgdata
target: /var/lib/postgresql/data
akkoma:
image: akkoma:latest
build: .
restart: unless-stopped
env_file:
- .env
links:
- db
ports: [
# Uncomment/Change port mappings below as needed.
# The left side is your host machine, the right one is the akkoma container.
# You can prefix the left side with an ip.
# Webserver (for reverse-proxies outside of docker)
# If you use a dockerized proxy, you can leave this commented
# and use a container link instead.
"127.0.0.1:4000:4000",
]
volumes:
- .:/opt/akkoma
# Uncomment the following if you want to use a reverse proxy
#proxy:
# image: caddy:2-alpine
# restart: unless-stopped
# links:
# - akkoma
# ports: [
# "443:443",
# "80:80"
# ]
# volumes:
# - ./docker-resources/Caddyfile:/etc/caddy/Caddyfile
# - ./caddy-data:/data
# - ./caddy-config:/config

View file

@ -8,7 +8,7 @@ while ! pg_isready -U ${DB_USER:-pleroma} -d postgres://${DB_HOST:-db}:5432/${DB
done
echo "-- Running migrations..."
mix ecto.migrate
$HOME/bin/pleroma_ctl migrate
echo "-- Starting!"
mix phx.server
exec $HOME/bin/pleroma start

View file

@ -1,14 +0,0 @@
# default docker Caddyfile config for Akkoma
#
# Simple installation instructions:
# 1. Replace 'example.tld' with your instance's domain wherever it appears.
example.tld {
log {
output file /var/log/caddy/akkoma.log
}
encode gzip
reverse_proxy akkoma:4000
}

View file

@ -1,4 +0,0 @@
#!/bin/sh
docker-compose build --build-arg UID=$(id -u) --build-arg GID=$(id -g) akkoma
docker-compose build --build-arg UID=$(id -u) --build-arg GID=$(id -g) db

View file

@ -1,10 +0,0 @@
FROM postgres:14-alpine
ARG UID=1000
ARG GID=1000
ARG UNAME=akkoma
RUN addgroup -g $GID $UNAME
RUN adduser -u $UID -G $UNAME -D -h $HOME $UNAME
USER akkoma

View file

@ -1,4 +0,0 @@
MIX_ENV=prod
DB_NAME=akkoma
DB_USER=akkoma
DB_PASS=akkoma

View file

@ -1,3 +0,0 @@
#!/bin/sh
docker-compose run --rm akkoma $@

View file

@ -1,14 +1,7 @@
all: install
pipenv run mkdocs build
branch := $(shell git rev-parse --abbrev-ref HEAD)
install:
pipenv install
clean:
rm -rf site
serve:
pipenv run python3 -m http.server -d site
zip:
zip -r docs.zip site/*
deploy:
cd site && rclone copy . scaleway:akkoma-docs/$(branch)

165
docs/Pipfile.lock generated
View file

@ -14,22 +14,6 @@
]
},
"default": {
"certifi": {
"hashes": [
"sha256:0d9c601124e5a6ba9712dbc60d9c53c21e34f5f641fe83002317394311bdce14",
"sha256:90c1a32f1d68f940488354e36370f6cca89f0f106db09518524c88d6ed83f382"
],
"markers": "python_full_version >= '3.6.0'",
"version": "==2022.9.24"
},
"charset-normalizer": {
"hashes": [
"sha256:5a3d016c7c547f69d6f81fb0db9449ce888b418b5b9952cc5e6e66843e9dd845",
"sha256:83e9a75d1911279afd89352c68b45348559d1fc0506b054b346651b5e7fee29f"
],
"markers": "python_full_version >= '3.6.0'",
"version": "==2.1.1"
},
"click": {
"hashes": [
"sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e",
@ -45,13 +29,13 @@
],
"version": "==2.1.0"
},
"idna": {
"importlib-metadata": {
"hashes": [
"sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4",
"sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"
"sha256:637245b8bab2b6502fcbc752cc4b7a6f6243bb02b31c5c26156ad103d3d45670",
"sha256:7401a975809ea1fdc658c3aa4f78cc2195a0e019c5cbc4c06122884e9ae80c23"
],
"markers": "python_version >= '3.5'",
"version": "==3.4"
"markers": "python_version >= '3.7'",
"version": "==4.12.0"
},
"jinja2": {
"hashes": [
@ -66,16 +50,15 @@
"sha256:cbb516f16218e643d8e0a95b309f77eb118cb138d39a4f27851e6a63581db874",
"sha256:f5da449a6e1c989a4cea2631aa8ee67caa5a2ef855d551c88f9e309f4634c621"
],
"markers": "python_full_version >= '3.6.0'",
"markers": "python_version >= '3.6'",
"version": "==3.3.7"
},
"markdown-include": {
"hashes": [
"sha256:b8f6b6f4e8b506cbe773d7e26c74a97d1354c35f3a3452d3449140a8f578d665",
"sha256:d12fb51500c46334a53608635035c78b7d8ad7f772566f70b8a6a9b2ef2ddbf5"
"sha256:6f5d680e36f7780c7f0f61dca53ca581bd50d1b56137ddcd6353efafa0c3e4a2"
],
"index": "pypi",
"version": "==0.8.0"
"version": "==0.6.0"
},
"markupsafe": {
"hashes": [
@ -128,56 +111,56 @@
"sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8",
"sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307"
],
"markers": "python_full_version >= '3.6.0'",
"markers": "python_version >= '3.6'",
"version": "==1.3.4"
},
"mkdocs": {
"hashes": [
"sha256:8947af423a6d0facf41ea1195b8e1e8c85ad94ac95ae307fe11232e0424b11c5",
"sha256:c8856a832c1e56702577023cd64cc5f84948280c1c0fcc6af4cd39006ea6aa8c"
"sha256:26bd2b03d739ac57a3e6eed0b7bcc86168703b719c27b99ad6ca91dc439aacde",
"sha256:b504405b04da38795fec9b2e5e28f6aa3a73bb0960cb6d5d27ead28952bd35ea"
],
"markers": "python_version >= '3.7'",
"version": "==1.4.2"
"markers": "python_version >= '3.6'",
"version": "==1.3.0"
},
"mkdocs-material": {
"hashes": [
"sha256:b0ea0513fd8cab323e8a825d6692ea07fa83e917bb5db042e523afecc7064ab7",
"sha256:c907b4b052240a5778074a30a78f31a1f8ff82d7012356dc26898b97559f082e"
"sha256:263f2721f3abe533b61f7c8bed435a0462620912742c919821ac2d698b4bfe67",
"sha256:dc82b667d2a83f0de581b46a6d0949732ab77e7638b87ea35b770b33bc02e75a"
],
"index": "pypi",
"version": "==8.5.11"
"version": "==8.3.9"
},
"mkdocs-material-extensions": {
"hashes": [
"sha256:9c003da71e2cc2493d910237448c672e00cefc800d3d6ae93d2fc69979e3bd93",
"sha256:e41d9f38e4798b6617ad98ca8f7f1157b1e4385ac1459ca1e4ea219b556df945"
"sha256:a82b70e533ce060b2a5d9eb2bc2e1be201cf61f901f93704b4acf6e3d5983a44",
"sha256:bfd24dfdef7b41c312ede42648f9eb83476ea168ec163b613f9abd12bbfddba2"
],
"markers": "python_version >= '3.7'",
"version": "==1.1.1"
"markers": "python_version >= '3.6'",
"version": "==1.0.3"
},
"packaging": {
"hashes": [
"sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb",
"sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"
],
"markers": "python_full_version >= '3.6.0'",
"markers": "python_version >= '3.6'",
"version": "==21.3"
},
"pygments": {
"hashes": [
"sha256:56a8508ae95f98e2b9bdf93a6be5ae3f7d8af858b43e02c5a2ff083726be40c1",
"sha256:f643f331ab57ba3c9d89212ee4a2dabc6e94f117cf4eefde99a0574720d14c42"
"sha256:5eb116118f9612ff1ee89ac96437bb6b49e8f04d8a13b514ba26f620208e26eb",
"sha256:dc9c10fb40944260f6ed4c688ece0cd2048414940f1cea51b8b226318411c519"
],
"markers": "python_full_version >= '3.6.0'",
"version": "==2.13.0"
"markers": "python_version >= '3.6'",
"version": "==2.12.0"
},
"pymdown-extensions": {
"hashes": [
"sha256:0f8fb7b74a37a61cc34e90b2c91865458b713ec774894ffad64353a5fce85cfc",
"sha256:ac698c15265680db5eb13cd4342abfcde2079ac01e5486028f47a1b41547b859"
"sha256:3ef2d998c0d5fa7eb09291926d90d69391283561cf6306f85cd588a5eb5befa0",
"sha256:ec141c0f4983755349f0c8710416348d1a13753976c028186ed14f190c8061c4"
],
"markers": "python_version >= '3.7'",
"version": "==9.9"
"version": "==9.5"
},
"pyparsing": {
"hashes": [
@ -197,7 +180,6 @@
},
"pyyaml": {
"hashes": [
"sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf",
"sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293",
"sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b",
"sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57",
@ -209,36 +191,30 @@
"sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287",
"sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513",
"sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0",
"sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782",
"sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0",
"sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92",
"sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f",
"sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2",
"sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc",
"sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1",
"sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c",
"sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86",
"sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4",
"sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c",
"sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34",
"sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b",
"sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d",
"sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c",
"sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb",
"sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7",
"sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737",
"sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3",
"sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d",
"sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358",
"sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53",
"sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78",
"sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803",
"sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a",
"sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f",
"sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174",
"sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"
],
"markers": "python_full_version >= '3.6.0'",
"markers": "python_version >= '3.6'",
"version": "==6.0"
},
"pyyaml-env-tag": {
@ -246,17 +222,9 @@
"sha256:70092675bda14fdec33b31ba77e7543de9ddc88f2e5b99160396572d11525bdb",
"sha256:af31106dec8a4d68c60207c1886031cbf839b68aa7abccdb19868200532c2069"
],
"markers": "python_full_version >= '3.6.0'",
"markers": "python_version >= '3.6'",
"version": "==0.1"
},
"requests": {
"hashes": [
"sha256:7c5599b102feddaa661c826c56ab4fee28bfd17f5abca1ebbe3e7f19d7c97983",
"sha256:8fefa2a1a1365bf5520aac41836fbee479da67864514bdb821f31ce07ce65349"
],
"markers": "python_version >= '3.7' and python_version < '4'",
"version": "==2.28.1"
},
"six": {
"hashes": [
"sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926",
@ -265,47 +233,44 @@
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"version": "==1.16.0"
},
"urllib3": {
"hashes": [
"sha256:47cc05d99aaa09c9e72ed5809b60e7ba354e64b59c9c173ac3018642d8bb41fc",
"sha256:c083dd0dce68dbfbe1129d5271cb90f9447dea7d52097c6e0126120c521ddea8"
],
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4, 3.5'",
"version": "==1.26.13"
},
"watchdog": {
"hashes": [
"sha256:1893d425ef4fb4f129ee8ef72226836619c2950dd0559bba022b0818c63a7b60",
"sha256:1a410dd4d0adcc86b4c71d1317ba2ea2c92babaf5b83321e4bde2514525544d5",
"sha256:1f2b0665c57358ce9786f06f5475bc083fea9d81ecc0efa4733fd0c320940a37",
"sha256:1f8eca9d294a4f194ce9df0d97d19b5598f310950d3ac3dd6e8d25ae456d4c8a",
"sha256:27e49268735b3c27310883012ab3bd86ea0a96dcab90fe3feb682472e30c90f3",
"sha256:28704c71afdb79c3f215c90231e41c52b056ea880b6be6cee035c6149d658ed1",
"sha256:2ac0bd7c206bb6df78ef9e8ad27cc1346f2b41b1fef610395607319cdab89bc1",
"sha256:2af1a29fd14fc0a87fb6ed762d3e1ae5694dcde22372eebba50e9e5be47af03c",
"sha256:3a048865c828389cb06c0bebf8a883cec3ae58ad3e366bcc38c61d8455a3138f",
"sha256:441024df19253bb108d3a8a5de7a186003d68564084576fecf7333a441271ef7",
"sha256:56fb3f40fc3deecf6e518303c7533f5e2a722e377b12507f6de891583f1b48aa",
"sha256:619d63fa5be69f89ff3a93e165e602c08ed8da402ca42b99cd59a8ec115673e1",
"sha256:74535e955359d79d126885e642d3683616e6d9ab3aae0e7dcccd043bd5a3ff4f",
"sha256:76a2743402b794629a955d96ea2e240bd0e903aa26e02e93cd2d57b33900962b",
"sha256:83cf8bc60d9c613b66a4c018051873d6273d9e45d040eed06d6a96241bd8ec01",
"sha256:920a4bda7daa47545c3201a3292e99300ba81ca26b7569575bd086c865889090",
"sha256:9e99c1713e4436d2563f5828c8910e5ff25abd6ce999e75f15c15d81d41980b6",
"sha256:a5bd9e8656d07cae89ac464ee4bcb6f1b9cecbedc3bf1334683bed3d5afd39ba",
"sha256:ad0150536469fa4b693531e497ffe220d5b6cd76ad2eda474a5e641ee204bbb6",
"sha256:af4b5c7ba60206759a1d99811b5938ca666ea9562a1052b410637bb96ff97512",
"sha256:c7bd98813d34bfa9b464cf8122e7d4bec0a5a427399094d2c17dd5f70d59bc61",
"sha256:ceaa9268d81205876bedb1069f9feab3eccddd4b90d9a45d06a0df592a04cae9",
"sha256:cf05e6ff677b9655c6e9511d02e9cc55e730c4e430b7a54af9c28912294605a4",
"sha256:d0fb5f2b513556c2abb578c1066f5f467d729f2eb689bc2db0739daf81c6bb7e",
"sha256:d6ae890798a3560688b441ef086bb66e87af6b400a92749a18b856a134fc0318",
"sha256:e5aed2a700a18c194c39c266900d41f3db0c1ebe6b8a0834b9995c835d2ca66e",
"sha256:e722755d995035dd32177a9c633d158f2ec604f2a358b545bba5bed53ab25bca",
"sha256:ed91c3ccfc23398e7aa9715abf679d5c163394b8cad994f34f156d57a7c163dc"
"sha256:083171652584e1b8829581f965b9b7723ca5f9a2cd7e20271edf264cfd7c1412",
"sha256:117ffc6ec261639a0209a3252546b12800670d4bf5f84fbd355957a0595fe654",
"sha256:186f6c55abc5e03872ae14c2f294a153ec7292f807af99f57611acc8caa75306",
"sha256:195fc70c6e41237362ba720e9aaf394f8178bfc7fa68207f112d108edef1af33",
"sha256:226b3c6c468ce72051a4c15a4cc2ef317c32590d82ba0b330403cafd98a62cfd",
"sha256:247dcf1df956daa24828bfea5a138d0e7a7c98b1a47cf1fa5b0c3c16241fcbb7",
"sha256:255bb5758f7e89b1a13c05a5bceccec2219f8995a3a4c4d6968fe1de6a3b2892",
"sha256:43ce20ebb36a51f21fa376f76d1d4692452b2527ccd601950d69ed36b9e21609",
"sha256:4f4e1c4aa54fb86316a62a87b3378c025e228178d55481d30d857c6c438897d6",
"sha256:5952135968519e2447a01875a6f5fc8c03190b24d14ee52b0f4b1682259520b1",
"sha256:64a27aed691408a6abd83394b38503e8176f69031ca25d64131d8d640a307591",
"sha256:6b17d302850c8d412784d9246cfe8d7e3af6bcd45f958abb2d08a6f8bedf695d",
"sha256:70af927aa1613ded6a68089a9262a009fbdf819f46d09c1a908d4b36e1ba2b2d",
"sha256:7a833211f49143c3d336729b0020ffd1274078e94b0ae42e22f596999f50279c",
"sha256:8250546a98388cbc00c3ee3cc5cf96799b5a595270dfcfa855491a64b86ef8c3",
"sha256:97f9752208f5154e9e7b76acc8c4f5a58801b338de2af14e7e181ee3b28a5d39",
"sha256:9f05a5f7c12452f6a27203f76779ae3f46fa30f1dd833037ea8cbc2887c60213",
"sha256:a735a990a1095f75ca4f36ea2ef2752c99e6ee997c46b0de507ba40a09bf7330",
"sha256:ad576a565260d8f99d97f2e64b0f97a48228317095908568a9d5c786c829d428",
"sha256:b530ae007a5f5d50b7fbba96634c7ee21abec70dc3e7f0233339c81943848dc1",
"sha256:bfc4d351e6348d6ec51df007432e6fe80adb53fd41183716017026af03427846",
"sha256:d3dda00aca282b26194bdd0adec21e4c21e916956d972369359ba63ade616153",
"sha256:d9820fe47c20c13e3c9dd544d3706a2a26c02b2b43c993b62fcd8011bcc0adb3",
"sha256:ed80a1628cee19f5cfc6bb74e173f1b4189eb532e705e2a13e3250312a62e0c9",
"sha256:ee3e38a6cc050a8830089f79cbec8a3878ec2fe5160cdb2dc8ccb6def8552658"
],
"markers": "python_full_version >= '3.6.0'",
"version": "==2.2.0"
"markers": "python_version >= '3.6'",
"version": "==2.1.9"
},
"zipp": {
"hashes": [
"sha256:56bf8aadb83c24db6c4b577e13de374ccfb67da2078beba1d037c17980bf43ad",
"sha256:c4f6e5bbf48e74f7a38e7cc5b0480ff42b0ae5178957d564d18932525d5cf099"
],
"markers": "python_version >= '3.7'",
"version": "==3.8.0"
}
},
"develop": {}

View file

@ -159,23 +159,3 @@ Change `default_text_search_config` for database and (if necessary) text_search_
```
See [PostgreSQL documentation](https://www.postgresql.org/docs/current/textsearch-configuration.html) and `docs/configuration/howto_search_cjk.md` for more detail.
## Pruning old activities
Over time, transient `Delete` activities and `Tombstone` objects
can accumulate in your database, inflating its size. This is not ideal.
There is a periodic task to prune these transient objects,
but on first run this may take a while on older instances to catch up
to the current day.
=== "OTP"
```sh
./bin/pleroma_ctl database prune_task
```
=== "From Source"
```sh
mix pleroma.database prune_task
```

View file

@ -1,30 +0,0 @@
# Diagnostics
A few tasks to help with debugging, troubleshooting, and diagnosing problems.
They mostly relate to common postgres queries.
## Home timeline query plan
This task will print a query plan for the home timeline of a given user.
=== "OTP"
`./bin/pleroma_ctl diagnostics home_timeline <nickname>`
=== "From Source"
`mix pleroma.diagnostics home_timeline <nickname>`
## User timeline query plan
This task will print a query plan for the user timeline of a given user,
from the perspective of another given user.
=== "OTP"
`./bin/pleroma_ctl diagnostics user_timeline <nickname> <viewing_nickname>`
=== "From Source"
`mix pleroma.diagnostics user_timeline <nickname> <viewing_nickname>`

View file

@ -300,28 +300,3 @@
```sh
mix pleroma.user unconfirm_all
```
## Fix following state
Sometimes the system can get into a situation where
it think you're already following someone and won't send a request
to the remote instance, or won't let you unfollow someone. This
bug was fixed, but in case you encounter these weird states:
=== "OTP"
```sh
./bin/pleroma_ctl user fix_follow_state localuser remoteuser@example.com
```
=== "From Source"
```sh
mix pleroma.user fix_follow_state localuser remoteuser@example.com
```
The first argument is the local user's nickname - if you are `myuser@myinstance`, this should be `myuser`.
The second is the remote user, consisting of both nickname AND domain.
If you are a weird follow state situation and cannot resolve it with the above, you may need to co-operate with the remote admin to clear the state their side too - they should provide the arguments *backwards*, i.e `fix_follow_state remote local`.

View file

@ -4,62 +4,38 @@
1. Stop the Akkoma service.
2. Go to the working directory of Akkoma (default is `/opt/akkoma`)
3. Run[¹] `sudo -Hu postgres pg_dump -d akkoma --format=custom -f </path/to/backup_location/akkoma.pgdump>` (make sure the postgres user has write access to the destination file)
4. Copy `akkoma.pgdump`, `config/prod.secret.exs`[²], `config/setup_db.psql` (if still available) and the `uploads` folder to your backup destination. If you have other modifications, copy those changes too.
3. Run `sudo -Hu postgres pg_dump -d <akkoma_db> --format=custom -f </path/to/backup_location/akkoma.pgdump>` (make sure the postgres user has write access to the destination file)
4. Copy `akkoma.pgdump`, `config/prod.secret.exs`, `config/setup_db.psql` (if still available) and the `uploads` folder to your backup destination. If you have other modifications, copy those changes too.
5. Restart the Akkoma service.
[¹]: We assume the database name is "akkoma". If not, you can find the correct name in your config files.
[²]: If you've installed using OTP, you need `config/config.exs` instead of `config/prod.secret.exs`.
## Restore/Move
1. Optionally reinstall Akkoma (either on the same server or on another server if you want to move servers).
2. Stop the Akkoma service.
3. Go to the working directory of Akkoma (default is `/opt/akkoma`)
4. Copy the above mentioned files back to their original position.
5. Drop the existing database and user if restoring in-place[¹]. `sudo -Hu postgres psql -c 'DROP DATABASE akkoma;';` `sudo -Hu postgres psql -c 'DROP USER akkoma;'`
6. Restore the database schema and akkoma role using either of the following options
* You can use the original `setup_db.psql` if you have it[²]: `sudo -Hu postgres psql -f config/setup_db.psql`.
* Or recreate the database and user yourself (replace the password with the one you find in the config file) `sudo -Hu postgres psql -c "CREATE USER akkoma WITH ENCRYPTED PASSWORD '<database-password-wich-you-can-find-in-your-config-file>'; CREATE DATABASE akkoma OWNER akkoma;"`.
7. Now restore the Akkoma instance's data into the empty database schema[¹][³]: `sudo -Hu postgres pg_restore -d akkoma -v -1 </path/to/backup_location/akkoma.pgdump>`
8. If you installed a newer Akkoma version, you should run `MIX_ENV=prod mix ecto.migrate`[⁴]. This task performs database migrations, if there were any.
5. Drop the existing database and user if restoring in-place. `sudo -Hu postgres psql -c 'DROP DATABASE <akkoma_db>;';` `sudo -Hu postgres psql -c 'DROP USER <akkoma_db>;'`
6. Restore the database schema and akkoma postgres role the with the original `setup_db.psql` if you have it: `sudo -Hu postgres psql -f config/setup_db.psql`.
Alternatively, run the `mix pleroma.instance gen` task again. You can ignore most of the questions, but make the database user, name, and password the same as found in your backup of `config/prod.secret.exs`. Then run the restoration of the akkoma role and schema with of the generated `config/setup_db.psql` as instructed above. You may delete the `config/generated_config.exs` file as it is not needed.
7. Now restore the Akkoma instance's data into the empty database schema: `sudo -Hu postgres pg_restore -d <akkoma_db> -v -1 </path/to/backup_location/akkoma.pgdump>`
8. If you installed a newer Akkoma version, you should run `mix ecto.migrate`[^1]. This task performs database migrations, if there were any.
9. Restart the Akkoma service.
10. Run `sudo -Hu postgres vacuumdb --all --analyze-in-stages`. This will quickly generate the statistics so that postgres can properly plan queries.
11. If setting up on a new server configure Nginx by using the `installation/akkoma.nginx` config sample or reference the Akkoma installation guide for your OS which contains the Nginx configuration instructions.
[¹]: We assume the database name and user are both "akkoma". If not, you can find the correct name in your config files.
[²]: You can recreate the `config/setup_db.psql` by running the `mix pleroma.instance gen` task again. You can ignore most of the questions, but make the database user, name, and password the same as found in your backed up config file. This will also create a new `config/generated_config.exs` file which you may delete as it is not needed.
[³]: `pg_restore` will add data before adding indexes. The indexes are added in alphabetical order. There's one index, `activities_visibility_index` which may take a long time because it can't make use of an index that's only added later. You can significantly speed up restoration by skipping this index and add it afterwards. For that, you can do the following (we assume the akkoma.pgdump is in the directory you're running the commands):
```sh
pg_restore -l akkoma.pgdump > db.list
# Comment out the step for creating activities_visibility_index by adding a semi colon at the start of the line
sed -i -E 's/(.*activities_visibility_index.*)/;\1/' db.list
# We restore the database using the db.list list-file
sudo -Hu postgres pg_restore -L db.list -d akkoma -v -1 akkoma.pgdump
# You can see the sql statement with which to create the index using
grep -Eao 'CREATE INDEX activities_visibility_index.*' akkoma.pgdump
# Then create the index manually
# Make sure that the command to create is correct! You never know it has changed since writing this guide
sudo -Hu postgres psql -d pleroma_ynh -c "CREATE INDEX activities_visibility_index ON public.activities USING btree (public.activity_visibility(actor, recipients, data), id DESC NULLS LAST) WHERE ((data ->> 'type'::text) = 'Create'::text);"
```
[⁴]: Prefix with `MIX_ENV=prod` to run it using the production config file.
[^1]: Prefix with `MIX_ENV=prod` to run it using the production config file.
## Remove
1. Optionally you can remove the users of your instance. This will trigger delete requests for their accounts and posts. Note that this is 'best effort' and doesn't mean that all traces of your instance will be gone from the fediverse.
* You can do this from the admin-FE where you can select all local users and delete the accounts using the *Moderate multiple users* dropdown.
* You can also list local users and delete them individually using the CLI tasks for [Managing users](./CLI_tasks/user.md).
* You can also list local users and delete them individualy using the CLI tasks for [Managing users](./CLI_tasks/user.md).
2. Stop the Akkoma service `systemctl stop akkoma`
3. Disable Akkoma from systemd `systemctl disable akkoma`
3. Disable akkoma from systemd `systemctl disable akkoma`
4. Remove the files and folders you created during installation (see installation guide). This includes the akkoma, nginx and systemd files and folders.
5. Reload nginx now that the configuration is removed `systemctl reload nginx`
6. Remove the database and database user[¹] `sudo -Hu postgres psql -c 'DROP DATABASE akkoma;';` `sudo -Hu postgres psql -c 'DROP USER akkoma;'`
6. Remove the database and database user `sudo -Hu postgres psql -c 'DROP DATABASE <akkoma_db>;';` `sudo -Hu postgres psql -c 'DROP USER <akkoma_db>;'`
7. Remove the system user `userdel akkoma`
8. Remove the dependencies that you don't need anymore (see installation guide). Make sure you don't remove packages that are still needed for other software that you have running!
[¹]: We assume the database name and user are both "akkoma". If not, you can find the correct name in your config files.

View file

@ -14,10 +14,6 @@ su akkoma -s $SHELL -lc "./bin/pleroma_ctl update"
su akkoma -s $SHELL -lc "./bin/pleroma_ctl migrate"
```
If you selected an alternate flavour on installation,
you _may_ need to specify `--flavour`, in the same way as
[when installing](../../installation/otp_en#detecting-flavour).
## For from source installations (using git)
1. Go to the working directory of Akkoma (default is `/opt/akkoma`)

View file

@ -1,8 +1,21 @@
# Akkoma Clients
Note: Additional clients may work, but these are known to work with Akkoma.
Apps listed here might not support all of Akkoma's features.
# Pleroma Clients
Note: Additional clients may be working but theses are officially supporting Pleroma.
Feel free to contact us to be added to this list!
## Desktop
### Roma for Desktop
- Homepage: <https://www.pleroma.com/#desktopApp>
- Source Code: <https://github.com/roma-apps/roma-desktop>
- Platforms: Windows, Mac, Linux
- Features: MastoAPI, Streaming Ready
### Social
- Source Code: <https://gitlab.gnome.org/World/Social>
- Contact: [@brainblasted@social.libre.fi](https://social.libre.fi/users/brainblasted)
- Platforms: Linux (GNOME)
- Note(2019-01-28): Not at a pre-alpha stage yet
- Features: MastoAPI
### Whalebird
- Homepage: <https://whalebird.social/>
- Source Code: <https://github.com/h3poteto/whalebird-desktop>
@ -17,16 +30,28 @@ Apps listed here might not support all of Akkoma's features.
- Platforms: Android
- Features: MastoAPI, ActivityPub (Client-to-Server)
### Amaroq
- Homepage: <https://itunes.apple.com/us/app/amaroq-for-mastodon/id1214116200>
- Source Code: <https://github.com/ReticentJohn/Amaroq>
- Contact: [@eurasierboy@mastodon.social](https://mastodon.social/users/eurasierboy)
- Platforms: iOS
- Features: MastoAPI, No Streaming
### Fedilab
- Homepage: <https://fedilab.app/>
- Source Code: <https://codeberg.org/tom79/Fedilab>
- Contact: [@apps@toot.felilab.app](https://toot.fedilab.app/@apps)
- Source Code: <https://framagit.org/tom79/fedilab/>
- Contact: [@fedilab@framapiaf.org](https://framapiaf.org/users/fedilab)
- Platforms: Android
- Features: MastoAPI, Streaming Ready, Moderation, Text Formatting
### Kyclos
- Source Code: <https://git.pleroma.social/pleroma/harbour-kyclos>
- Platforms: SailfishOS
- Features: MastoAPI, No Streaming
### Husky
- Source code: <https://git.sr.ht/~captainepoch/husky>
- Contact: [@captainepoch@stereophonic.space](https://stereophonic.space/captainepoch)
- Source code: <https://git.mentality.rip/FWGS/Husky>
- Contact: [@Husky@enigmatic.observer](https://enigmatic.observer/users/Husky)
- Platforms: Android
- Features: MastoAPI, No Streaming, Emoji Reactions, Text Formatting, FE Stickers
@ -43,7 +68,32 @@ Apps listed here might not support all of Akkoma's features.
- Platforms: Android
- Features: MastoAPI, No Streaming
### Twidere
- Homepage: <https://twidere.mariotaku.org/>
- Source Code: <https://github.com/TwidereProject/Twidere-Android/>
- Contact: <me@mariotaku.org>
- Platform: Android
- Features: MastoAPI, No Streaming
### Indigenous
- Homepage: <https://indigenous.realize.be/>
- Source Code: <https://github.com/swentel/indigenous-android/>
- Contact: [@swentel@realize.be](https://realize.be)
- Platforms: Android
- Features: MastoAPI, No Streaming
## Alternative Web Interfaces
### Brutaldon
- Homepage: <https://jfm.carcosa.net/projects/software/brutaldon/>
- Source Code: <https://git.carcosa.net/jmcbray/brutaldon>
- Contact: [@gcupc@glitch.social](https://glitch.social/users/gcupc)
- Features: MastoAPI, No Streaming
### Halcyon
- Source Code: <https://notabug.org/halcyon-suite/halcyon>
- Contact: [@halcyon@social.csswg.org](https://social.csswg.org/users/halcyon)
- Features: MastoAPI, Streaming Ready
### Pinafore
- Homepage: <https://pinafore.social/>
- Source Code: <https://github.com/nolanlawson/pinafore>

View file

@ -8,6 +8,11 @@ For from source installations Akkoma configuration works by first importing the
To add configuration to your config file, you can copy it from the base config. The latest version of it can be viewed [here](https://akkoma.dev/AkkomaGang/akkoma/src/branch/develop/config/config.exs). You can also use this file if you don't know how an option is supposed to be formatted.
## :shout
* `enabled` - Enables the backend Shoutbox chat feature. Defaults to `true`.
* `limit` - Shout character limit. Defaults to `5_000`
## :instance
* `name`: The instances name.
* `email`: Email used to reach an Administrator/Moderator of the instance.
@ -33,9 +38,8 @@ To add configuration to your config file, you can copy it from the base config.
* `federation_incoming_replies_max_depth`: Max. depth of reply-to activities fetching on incoming federation, to prevent out-of-memory situations while fetching very long threads. If set to `nil`, threads of any depth will be fetched. Lower this value if you experience out-of-memory crashes.
* `federation_reachability_timeout_days`: Timeout (in days) of each external federation target being unreachable prior to pausing federating to it.
* `allow_relay`: Permits remote instances to subscribe to all public posts of your instance. This may increase the visibility of your instance.
* `public`: Allows unauthenticated access to public resources on your instance. This is essentially used as the default value for `:restrict_unauthenticated`.
See `restrict_unauthenticated` for more details.
* `quarantined_instances`: *DEPRECATED* ActivityPub instances where activities will not be sent. They can still reach there via other means, we just won't send them.
* `public`: Makes the client API in authenticated mode-only except for user-profiles. Useful for disabling the Local Timeline and The Whole Known Network. Note that there is a dependent setting restricting or allowing unauthenticated access to specific resources, see `restrict_unauthenticated` for more details.
* `quarantined_instances`: ActivityPub instances where private (DMs, followers-only) activities will not be send.
* `allowed_post_formats`: MIME-type list of formats allowed to be posted (transformed into HTML).
* `extended_nickname_format`: Set to `true` to use extended local nicknames format (allows underscores/dashes). This will break federation with
older software for theses nicknames.
@ -60,8 +64,6 @@ To add configuration to your config file, you can copy it from the base config.
* `cleanup_attachments`: Remove attachments along with statuses. Does not affect duplicate files and attachments without status. Enabling this will increase load to database when deleting statuses on larger instances.
* `show_reactions`: Let favourites and emoji reactions be viewed through the API (default: `true`).
* `password_reset_token_validity`: The time after which reset tokens aren't accepted anymore, in seconds (default: one day).
* `local_bubble`: Array of domains representing instances closely related to yours. Used to populate the `bubble` timeline. e.g `["example.com"]`, (default: `[]`)
* `languages`: List of Language Codes used by the instance. This is used to try and set a default language from the frontend. It will try and find the first match between the languages set here and the user's browser languages. It will default to the first language in this setting if there is no match.. (default `["en"]`)
## :database
* `improved_hashtag_timeline`: Setting to force toggle / force disable improved hashtags timeline. `:enabled` forces hashtags to be fetched from `hashtags` table for hashtags timeline. `:disabled` forces object-embedded hashtags to be used (slower). Keep it `:auto` for automatic behaviour (it is auto-set to `:enabled` [unless overridden] when HashtagsTableMigrator completes).
@ -75,6 +77,10 @@ To add configuration to your config file, you can copy it from the base config.
* `enabled`: Enables the send a direct message to a newly registered user. Defaults to `false`.
* `sender_nickname`: The nickname of the local user that sends the welcome message.
* `message`: A message that will be send to a newly registered users as a direct message.
* `chat_message`: - welcome message sent as a chat message.
* `enabled`: Enables the send a chat message to a newly registered user. Defaults to `false`.
* `sender_nickname`: The nickname of the local user that sends the welcome message.
* `message`: A message that will be send to a newly registered users as a chat message.
* `email`: - welcome message sent as a email.
* `enabled`: Enables the send a welcome email to a newly registered user. Defaults to `false`.
* `sender`: The email address or tuple with `{nickname, email}` that will use as sender to the welcome email.
@ -121,11 +127,8 @@ To add configuration to your config file, you can copy it from the base config.
* `Pleroma.Web.ActivityPub.MRF.FollowBotPolicy`: Automatically follows newly discovered users from the specified bot account. Local accounts, locked accounts, and users with "#nobot" in their bio are respected and excluded from being followed.
* `Pleroma.Web.ActivityPub.MRF.AntiFollowbotPolicy`: Drops follow requests from followbots. Users can still allow bots to follow them by first following the bot.
* `Pleroma.Web.ActivityPub.MRF.KeywordPolicy`: Rejects or removes from the federated timeline or replaces keywords. (See [`:mrf_keyword`](#mrf_keyword)).
* `Pleroma.Web.ActivityPub.MRF.NormalizeMarkup`: Pass inbound HTML through a scrubber to make sure it doesn't have anything unusual in it. On by default, cannot be turned off.
* `Pleroma.Web.ActivityPub.MRF.InlineQuotePolicy`: Append a link to a post that quotes another post with the link to the quoted post, to ensure that software that does not understand quotes can have full context. On by default, cannot be turned off.
* `transparency`: Make the content of your Message Rewrite Facility settings public (via nodeinfo).
* `transparency_exclusions`: Exclude specific instance names from MRF transparency. The use of the exclusions feature will be disclosed in nodeinfo as a boolean value.
* `transparency_obfuscate_domains`: Show domains with `*` in the middle, to censor them if needed. For example, `ridingho.me` will show as `rid*****.me`
## Federation
### MRF policies
@ -137,7 +140,7 @@ To add configuration to your config file, you can copy it from the base config.
* `media_removal`: List of instances to strip media attachments from and the reason for doing so.
* `media_nsfw`: List of instances to tag all media as NSFW (sensitive) from and the reason for doing so.
* `federated_timeline_removal`: List of instances to remove from the Federated Timeline (aka The Whole Known Network) and the reason for doing so.
* `reject`: List of instances to reject activities (except deletes) from and the reason for doing so. Additionally prevents activities from being sent to that instance.
* `reject`: List of instances to reject activities (except deletes) from and the reason for doing so.
* `accept`: List of instances to only accept activities (except deletes) from and the reason for doing so.
* `followers_only`: Force posts from the given instances to be visible by followers only and the reason for doing so.
* `report_removal`: List of instances to reject reports from and the reason for doing so.
@ -289,19 +292,14 @@ config :pleroma, :frontends,
"name" => "swagger-ui",
"ref" => "stable",
"enabled" => true
},
mastodon: %{
"name" => "mastodon-fe",
"ref" => "akkoma"
}
```
* `:primary` - The frontend that will be served at `/`
* `:admin` - The frontend that will be served at `/pleroma/admin`
* `:swagger` - Config for developers to act as an API reference to be served at `/akkoma/swaggerui/` (trailing slash _needed_). Disabled by default.
* `:mastodon` - The mastodon-fe configuration. This shouldn't need to be changed. This is served at `/web` when installed.
### :static\_fe
### :static_fe
Render profiles and posts using server-generated HTML that is viewable without using JavaScript.
@ -456,6 +454,7 @@ This will make Akkoma listen on `127.0.0.1` port `8080` and generate urls starti
* ``enabled``: Whether the managed content security policy is enabled.
* ``sts``: Whether to additionally send a `Strict-Transport-Security` header.
* ``sts_max_age``: The maximum age for the `Strict-Transport-Security` header if sent.
* ``ct_max_age``: The maximum age for the `Expect-CT` header if sent.
* ``referrer_policy``: The referrer policy to use, either `"same-origin"` or `"no-referrer"`.
* ``report_uri``: Adds the specified url to `report-uri` and `report-to` group in CSP header.
@ -526,9 +525,7 @@ Available caches:
### :http
* `receive_timeout`: the amount of time, in ms, to wait for a remote server to respond to a request. (default: `15000`)
* `pool_timeout`: the amount of time, in ms, to wait to check out an HTTP connection from the pool. This likely does not need changing unless your instance is _very_ busy with outbound requests. (default `5000`)
* `proxy_url`: an upstream proxy to fetch posts and/or media with, (default: `nil`); for example `http://127.0.0.1:3192`. Does not support SOCKS5 proxy, only http(s).
* `proxy_url`: an upstream proxy to fetch posts and/or media with, (default: `nil`)
* `send_user_agent`: should we include a user agent with HTTP requests? (default: `true`)
* `user_agent`: what user agent should we use? (default: `:default`), must be string or `:default`
* `adapter`: array of adapter options
@ -1046,22 +1043,7 @@ config :pleroma, Pleroma.Formatter,
## Custom Runtime Modules (`:modules`)
* `runtime_dir`: A path to custom Elixir modules, such as MRF policies or
custom authenticators. These modules will be loaded on boot, and can be
contained in subdirectories. It is advised to use version-controlled
subdirectories to make management of them a bit easier. Note that only
files with the extension `.ex` will be loaded.
```elixir
config :pleroma, :modules, runtime_dir: "instance/modules"
```
### Adding a module
```bash
cd instance/modules/
git clone <MY MODULE>
```
* `runtime_dir`: A path to custom Elixir modules (such as MRF policies).
## :configurable_from_database
@ -1095,7 +1077,7 @@ config :pleroma, :database_config_whitelist, [
### :restrict_unauthenticated
Restrict access for unauthenticated users to timelines (public and federated), user profiles and posts.
Restrict access for unauthenticated users to timelines (public and federated), user profiles and statuses.
* `timelines`: public and federated timelines
* `local`: public timeline
@ -1103,24 +1085,13 @@ Restrict access for unauthenticated users to timelines (public and federated), u
* `profiles`: user profiles
* `local`
* `remote`
* `activities`: posts
* `activities`: statuses
* `local`
* `remote`
#### When :instance, :public is `true`
Note: when `:instance, :public` is set to `false`, all `:restrict_unauthenticated` items be effectively set to `true` by default. If you'd like to allow unauthenticated access to specific API endpoints on a private instance, please explicitly set `:restrict_unauthenticated` to non-default value in `config/prod.secret.exs`.
When your instance is in "public" mode, all public resources (users, posts, timelines) are accessible to unauthenticated users.
Turning any of the `:restrict_unauthenticated` options to `true` will restrict access to the corresponding resources.
#### When :instance, :public is `false`
When `:instance, :public` is set to `false`, all of the `:restrict_unauthenticated` options will effectively be set to `true` by default,
meaning that only authenticated users will be able to access the corresponding resources.
If you'd like to allow unauthenticated access to specific resources, you can turn these settings to `false`.
**Note**: setting `restrict_unauthenticated/timelines/local` to `true` has no practical sense if `restrict_unauthenticated/timelines/federated` is set to `false` (since local public activities will still be delivered to unauthenticated users as part of federated timeline).
Note: setting `restrict_unauthenticated/timelines/local` to `true` has no practical sense if `restrict_unauthenticated/timelines/federated` is set to `false` (since local public activities will still be delivered to unauthenticated users as part of federated timeline).
## Pleroma.Web.ApiSpec.CastAndValidate
@ -1176,28 +1147,3 @@ Each job has these settings:
* `:max_running` - max concurrently runnings jobs
* `:max_waiting` - max waiting jobs
### Translation Settings
Settings to automatically translate statuses for end users. Currently supported
translation services are DeepL and LibreTranslate.
Translations are available at `/api/v1/statuses/:id/translations/:language`, where
`language` is the target language code (e.g `en`)
### `:translator`
- `:enabled` - enables translation
- `:module` - Sets module to be used
- Either `Pleroma.Akkoma.Translators.DeepL` or `Pleroma.Akkoma.Translators.LibreTranslate`
### `:deepl`
- `:api_key` - API key for DeepL
- `:tier` - API tier
- either `:free` or `:pro`
### `:libre_translate`
- `:url` - URL of LibreTranslate instance
- `:api_key` - API key for LibreTranslate

View file

@ -19,10 +19,6 @@ config :pleroma, :frontends,
admin: %{
"name" => "admin-fe",
"ref" => "stable"
},
mastodon: %{
"name" => "mastodon-fe",
"ref" => "akkoma"
}
```
@ -30,18 +26,12 @@ This would serve the frontend from the the folder at `$instance_static/frontends
Refer to [the frontend CLI task](../../administration/CLI_tasks/frontend) for how to install the frontend's files
If you wish masto-fe to also be enabled, you will also need to run the install task for `mastodon-fe`. Not doing this will lead to the frontend not working.
If you choose not to install a frontend for whatever reason, it is recommended that you enable [`:static_fe`](#static_fe) to allow remote users to click "view remote source". Don't bother with this if you've got no unauthenticated access though.
You can also replace the default "no frontend" page by placing an `index.html` file under your `instance/static/` directory.
## Mastodon-FE
Akkoma supports both [glitchsoc](https://github.com/glitch-soc/mastodon)'s more "vanilla" mastodon frontend,
as well as [fedibird](https://github.com/fedibird/mastodon)'s extended frontend which has near-feature-parity with akkoma (with quoting and reactions).
To enable either one, you must run the `frontend.install` task for either `mastodon-fe` or `fedibird-fe` (both `--ref akkoma`), then make sure
`:pleroma, :frontends, :mastodon` references the one you want.
## Swagger (openAPI) documentation viewer
If you're a developer and you'd like a human-readable rendering of the

View file

@ -23,17 +23,18 @@ This sets the `secure` flag on Akkomas session cookie. This makes sure, that
This will send additional HTTP security headers to the clients, including:
* `X-XSS-Protection: "0"`
* `X-XSS-Protection: "1; mode=block"`
* `X-Permitted-Cross-Domain-Policies: "none"`
* `X-Frame-Options: "DENY"`
* `X-Content-Type-Options: "nosniff"`
* `X-Download-Options: "noopen"`
A content security policy (CSP) will also be set:
```csp
content-security-policy:
default-src 'none';
base-uri 'none';
base-uri 'self';
frame-ancestors 'none';
img-src 'self' data: blob: https:;
media-src 'self' https:;
@ -51,15 +52,19 @@ content-security-policy:
An additional “Strict transport security” header will be sent with the configured `sts_max_age` parameter. This tells the browser, that the domain should only be accessed over a secure HTTPs connection.
#### `ct_max_age`
An additional “Expect-CT” header will be sent with the configured `ct_max_age` parameter. This enforces the use of TLS certificates that are published in the certificate transparency log. (see [Expect-CT](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Expect-CT))
#### `referrer_policy`
> Recommended value: `same-origin`
If you click on a link, your browsers request to the other site will include from where it is coming from. The “Referrer policy” header tells the browser how and if it should send this information. (see [Referrer policy](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Referrer-Policy)). `no-referrer` can be used if a referrer is not needed for improved privacy.
If you click on a link, your browsers request to the other site will include from where it is coming from. The “Referrer policy” header tells the browser how and if it should send this information. (see [Referrer policy](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Referrer-Policy))
## systemd
A systemd unit example is provided at `installation/akkoma.service`.
A systemd unit example is provided at `installation/pleroma.service`.
### PrivateTmp

View file

@ -1,62 +0,0 @@
# How to use a different domain name for Akkoma and the users it serves
Akkoma users are primarily identified by a `user@example.org` handle, and you might want this identifier to be the same as your email or jabber account, for instance.
However, in this case, you are almost certainly serving some web content on `https://example.org` already, and you might want to use another domain (say `akkoma.example.org`) for Akkoma itself.
Akkoma supports that, but it might be tricky to set up, and any error might prevent you from federating with other instances.
*If you are already running Akkoma on `example.org`, it is no longer possible to move it to `akkoma.example.org`.*
## Account identifiers
It is important to understand that for federation purposes, a user in Akkoma has two unique identifiers associated:
- A webfinger `acct:` URI, used for discovery and as a verifiable global name for the user across Akkoma instances. In our example, our account's acct: URI is `acct:user@example.org`
- An author/actor URI, used in every other aspect of federation. This is the way in which users are identified in ActivityPub, the underlying protocol used for federation with other Akkoma instances.
In our case, it is `https://akkoma.example.org/users/user`.
Both account identifiers are unique and required for Akkoma. An important risk if you set up your Akkoma instance incorrectly is to create two users (with different acct: URIs) with conflicting author/actor URIs.
## WebFinger
As said earlier, each Akkoma user has an `acct`: URI, which is used for discovery and authentication. When you add @user@example.org, a webfinger query is performed. This is done in two steps:
1. Querying `https://example.org/.well-known/host-meta` (where the domain of the URL matches the domain part of the `acct`: URI) to get information on how to perform the query.
This file will indeed contain a URL template of the form `https://example.org/.well-known/webfinger?resource={uri}` that will be used in the second step.
2. Fill the returned template with the `acct`: URI to be queried and perform the query: `https://example.org/.well-known/webfinger?resource=acct:user@example.org`
## Configuring your Akkoma instance
**_DO NOT ATTEMPT TO CONFIGURE YOUR INSTANCE THIS WAY IF YOU DID NOT UNDERSTAND THE ABOVE_**
### Configuring Akkoma
Akkoma has a two configuration settings to enable using different domains for your users and Akkoma itself. `host` in `Pleroma.Web.Endpoint` and `domain` in `Pleroma.Web.WebFinger`. When the latter is not set, it defaults to the value of `host`.
*Be extra careful when configuring your Akkoma instance, as changing `host` may cause remote instances to register different accounts with the same author/actor URI, which will result in federation issues!*
```elixir
config :pleroma, Pleroma.Web.Endpoint,
url: [host: "pleroma.example.org"]
config :pleroma, Pleroma.Web.WebFinger, domain: "example.org"
```
- `domain` - is the domain for which your Akkoma instance has authority, it's the domain used in `acct:` URI. In our example, `domain` would be set to `example.org`.
- `host` - is the domain used for any URL generated for your instance, including the author/actor URL's. In our case, that would be `akkoma.example.org`.
### Configuring WebFinger domain
Now, you have Akkoma running at `https://akkoma.example.org` as well as a website at `https://example.org`. If you recall how webfinger queries work, the first step is to query `https://example.org/.well-known/host-meta`, which will contain an URL template.
Therefore, the easiest way to configure `example.org` is to redirect `/.well-known/host-meta` to `akkoma.example.org`.
With nginx, it would be as simple as adding:
```nginx
location = /.well-known/host-meta {
return 301 https://akkoma.example.org$request_uri;
}
```
in example.org's server block.

View file

@ -21,7 +21,7 @@ This will only save the theme for you personally. To make it available to the wh
### Upload the theme to the server
Themes can be found in the [static directory](static_dir.md). Create `STATIC-DIR/static/themes/` if needed and copy your theme there. Next you need to add an entry for your theme to `STATIC-DIR/static/styles.json`. If you use a from source installation, you'll first need to copy the file from `STATIC-DIR/frontends/pleroma-fe/REF/static/styles.json` (where `REF` is `stable` or `develop` depending on which ref you decided to install).
Themes can be found in the [static directory](static_dir.md). Create `STATIC-DIR/static/themes/` if needed and copy your theme there. Next you need to add an entry for your theme to `STATIC-DIR/static/styles.json`. If you use a from source installation, you'll first need to copy the file from `priv/static/static/styles.json`.
Example of `styles.json` where we add our own `my-awesome-theme.json`
```json
@ -71,3 +71,4 @@ config :pleroma, :frontend_configurations,
```
If you added it in the back-end configuration file, you'll need to restart your instance for the changes to take effect. If you don't see the changes, it's probably because the browser has cached the previous theme. In that case you'll want to clear browser caches. Alternatively you can use a private/incognito window just to see the changes.

View file

@ -155,11 +155,12 @@ server {
location / {
add_header X-XSS-Protection "0";
add_header X-XSS-Protection "1; mode=block";
add_header X-Permitted-Cross-Domain-Policies none;
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
add_header Referrer-Policy same-origin;
add_header X-Download-Options noopen;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;

View file

@ -15,6 +15,18 @@ The MRF provides user-configurable policies. The default policy is `NoOpPolicy`,
It is possible to use multiple, active MRF policies at the same time.
## Quarantine Instances
You have the ability to prevent from private / followers-only messages from federating with specific instances. Which means they will only get the public or unlisted messages from your instance.
If, for example, you're using `MIX_ENV=prod` aka using production mode, you would open your configuration file located in `config/prod.secret.exs` and edit or add the option under your `:instance` config object. Then you would specify the instance within quotes.
```elixir
config :pleroma, :instance,
[...]
quarantined_instances: ["instance.example", "other.example"]
```
## Using `SimplePolicy`
`SimplePolicy` is capable of handling most common admin tasks.
@ -29,7 +41,7 @@ config :pleroma, :mrf,
Once `SimplePolicy` is enabled, you can configure various groups in the `:mrf_simple` config object. These groups are:
* `reject`: Servers in this group will have their messages rejected. Also outbound messages will not be sent to these servers.
* `reject`: Servers in this group will have their messages rejected.
* `accept`: If not empty, only messages from these instances will be accepted (whitelist federation).
* `media_nsfw`: Servers in this group will have the #nsfw tag and sensitive setting injected into incoming messages which contain media.
* `media_removal`: Servers in this group will have media stripped from incoming messages.

View file

@ -14,12 +14,11 @@ apt -yq install tor
**WARNING:** Onion instances not using a Tor version supporting V3 addresses will not be able to federate with you.
Create the hidden service for your Akkoma instance in `/etc/tor/torrc`, with an HTTP tunnel:
Create the hidden service for your Akkoma instance in `/etc/tor/torrc`:
```
HiddenServiceDir /var/lib/tor/akkoma_hidden_service/
HiddenServicePort 80 127.0.0.1:8099
HiddenServiceVersion 3 # Remove if Tor version is below 0.3 ( tor --version )
HTTPTunnelPort 9080
```
Restart Tor to generate an adress:
```
@ -36,7 +35,7 @@ Next, edit your Akkoma config.
If running in prod, navigate to your Akkoma directory, edit `config/prod.secret.exs`
and append this line:
```
config :pleroma, :http, proxy_url: "http://localhost:9080"
config :pleroma, :http, proxy_url: {:socks5, :localhost, 9050}
```
In your Akkoma directory, assuming you're running prod,
run the following:
@ -99,11 +98,12 @@ server {
location / {
add_header X-XSS-Protection "0";
add_header X-XSS-Protection "1; mode=block";
add_header X-Permitted-Cross-Domain-Policies none;
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
add_header Referrer-Policy same-origin;
add_header X-Download-Options noopen;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;

View file

@ -1,54 +0,0 @@
# Using a Varnish Cache
Varnish is a layer that sits between your web server and your backend application -
it does something similar to nginx caching, but tends to be optimised for speed over
all else.
To set up a varnish cache, first you'll need to install varnish.
This will vary by distribution, and since this is a rather advanced guide,
no copy-paste instructions are provided. It's probably in your distribution's
package manager, though. `apt-get install varnish` and so on.
Once you have varnish installed, you'll need to configure it to work with akkoma.
Copy the configuration file to the varnish configuration directory:
cp installation/akkoma.vcl /etc/varnish/akkoma.vcl
You may want to check if varnish added a `default.vcl` file to the same directory,
if so you can just remove it without issue.
Then boot up varnish, probably `systemctl start varnish` or `service varnish start`.
Now you should be able to `curl -D- localhost:6081` and see a bunch of
akkoma javascript.
Once that's out of the way, we can point our webserver at varnish. This
=== "Nginx"
upstream phoenix {
server 127.0.0.1:6081 max_fails=5 fail_timeout=60s;
}
=== "Caddy"
reverse_proxy 127.0.0.1:6081
Now hopefully it all works
If you get a HTTPS redirect loop, you may need to remove this part of the VCL
```vcl
if (std.port(server.ip) != 443) {
set req.http.X-Forwarded-Proto = "http";
set req.http.x-redir = "https://" + req.http.host + req.url;
return (synth(750, ""));
} else {
set req.http.X-Forwarded-Proto = "https";
}
```
This will allow your webserver alone to handle redirects.

View file

@ -141,7 +141,8 @@ You then need to set the URL and authentication credentials if relevant.
### Initial indexing
After setting up the configuration, you'll want to index all of your already existsing posts. You'll only have to do it one time, but it might take a while, depending on the amount of posts your instance has seen.
After setting up the configuration, you'll want to index all of your already existsing posts. Only public posts are indexed. You'll only
have to do it one time, but it might take a while, depending on the amount of posts your instance has seen.
The sequence of actions is as follows:

View file

@ -89,23 +89,7 @@ config :pleroma, :frontend_configurations,
Terms of Service will be shown to all users on the registration page. It's the best place where to write down the rules for your instance. You can modify the rules by adding and changing `$static_dir/static/terms-of-service.html`.
## Favicon
The favicon will display on the frontend, and in the browser tab.
Place a PNG file at `$static_dir/favicon.png` to change the favicon. Not that this
is _one level above_ where the logo is placed, it should be on the same level as
the `frontends` directory.
## Styling rendered pages
To overwrite the CSS stylesheet of the OAuth form and other static pages, you can upload your own CSS file to `instance/static/static.css`. This will completely replace the CSS used by those pages, so it might be a good idea to copy the one from `priv/static/instance/static.css` and make your changes.
## Overriding pleroma-fe styles
To overwrite the CSS stylesheet of pleroma-fe, you can put a file at
`$static_dir/static/custom.css` containing your styles. These will be loaded
with the rest of the CSS.
You will probably have to put `!important` on most/all your styles to override the
default ones, due to the specificity precedence of CSS.

View file

@ -1031,6 +1031,7 @@ Most of the settings will be applied in `runtime`, this means that you don't nee
- `:hackney_pools`
- `:connections_pool`
- `:pools`
- `:chat`
- partially settings inside these keys:
- `:seconds_valid` in `Pleroma.Captcha`
- `:proxy_remote` in `Pleroma.Upload`
@ -1410,6 +1411,127 @@ Loads json generated from `config/descriptions.exs`.
```
## GET /api/v1/pleroma/admin/users/:nickname/chats
### List a user's chats
- Params: None
- Response:
```json
[
{
"sender": {
"id": "someflakeid",
"username": "somenick",
...
},
"receiver": {
"id": "someflakeid",
"username": "somenick",
...
},
"id" : "1",
"unread" : 2,
"last_message" : {...}, // The last message in that chat
"updated_at": "2020-04-21T15:11:46.000Z"
}
]
```
## GET /api/v1/pleroma/admin/chats/:chat_id
### View a single chat
- Params: None
- Response:
```json
{
"sender": {
"id": "someflakeid",
"username": "somenick",
...
},
"receiver": {
"id": "someflakeid",
"username": "somenick",
...
},
"id" : "1",
"unread" : 2,
"last_message" : {...}, // The last message in that chat
"updated_at": "2020-04-21T15:11:46.000Z"
}
```
## GET /api/v1/pleroma/admin/chats/:chat_id/messages
### List the messages in a chat
- Params: `max_id`, `min_id`
- Response:
```json
[
{
"account_id": "someflakeid",
"chat_id": "1",
"content": "Check this out :firefox:",
"created_at": "2020-04-21T15:11:46.000Z",
"emojis": [
{
"shortcode": "firefox",
"static_url": "https://dontbulling.me/emoji/Firefox.gif",
"url": "https://dontbulling.me/emoji/Firefox.gif",
"visible_in_picker": false
}
],
"id": "13",
"unread": true
},
{
"account_id": "someflakeid",
"chat_id": "1",
"content": "Whats' up?",
"created_at": "2020-04-21T15:06:45.000Z",
"emojis": [],
"id": "12",
"unread": false
}
]
```
## DELETE /api/v1/pleroma/admin/chats/:chat_id/messages/:message_id
### Delete a single message
- Params: None
- Response:
```json
{
"account_id": "someflakeid",
"chat_id": "1",
"content": "Check this out :firefox:",
"created_at": "2020-04-21T15:11:46.000Z",
"emojis": [
{
"shortcode": "firefox",
"static_url": "https://dontbulling.me/emoji/Firefox.gif",
"url": "https://dontbulling.me/emoji/Firefox.gif",
"visible_in_picker": false
}
],
"id": "13",
"unread": false
}
```
## `GET /api/v1/pleroma/admin/instance_document/:document_name`
### Get an instance document
@ -1514,117 +1636,3 @@ Returns the content of the document
"error": "Could not install frontend"
}
```
## `GET /api/v1/pleroma/admin/announcements`
### List announcements
- Params: `offset`, `limit`
- Response: JSON, list of announcements
```json
[
{
"id": "AHDp0GBdRn1EPN5HN2",
"content": "some content",
"starts_at": null,
"ends_at": null,
"all_day": false,
"published_at": "2022-03-09T02:13:05",
"reactions": [],
"statuses": [],
"tags": [],
"emojis": [],
"updated_at": "2022-03-09T02:13:05"
}
]
```
Note that this differs from the Mastodon API variant: Mastodon API only returns *active* announcements, while this returns all.
## `GET /api/v1/pleroma/admin/announcements/:id`
### Display one announcement
- Response: JSON, one announcement
```json
{
"id": "AHDp0GBdRn1EPN5HN2",
"content": "some content",
"starts_at": null,
"ends_at": null,
"all_day": false,
"published_at": "2022-03-09T02:13:05",
"reactions": [],
"statuses": [],
"tags": [],
"emojis": [],
"updated_at": "2022-03-09T02:13:05"
}
```
## `POST /api/v1/pleroma/admin/announcements`
### Create an announcement
- Params:
- `content`: string, required, announcement content
- `starts_at`: datetime, optional, default to null, the time when the announcement will become active (displayed to users); if it is null, the announcement will be active immediately
- `ends_at`: datetime, optional, default to null, the time when the announcement will become inactive (no longer displayed to users); if it is null, the announcement will be active until an admin deletes it
- `all_day`: boolean, optional, default to false, tells the client whether to only display dates for `starts_at` and `ends_at`
- Response: JSON, created announcement
```json
{
"id": "AHDp0GBdRn1EPN5HN2",
"content": "some content",
"starts_at": null,
"ends_at": null,
"all_day": false,
"published_at": "2022-03-09T02:13:05",
"reactions": [],
"statuses": [],
"tags": [],
"emojis": [],
"updated_at": "2022-03-09T02:13:05"
}
```
## `PATCH /api/v1/pleroma/admin/announcements/:id`
### Change an announcement
- Params: same as `POST /api/v1/pleroma/admin/announcements`, except no param is required.
- Updates the announcement according to params. Missing params are kept as-is.
- Response: JSON, updated announcement
```json
{
"id": "AHDp0GBdRn1EPN5HN2",
"content": "some content",
"starts_at": null,
"ends_at": null,
"all_day": false,
"published_at": "2022-03-09T02:13:05",
"reactions": [],
"statuses": [],
"tags": [],
"emojis": [],
"updated_at": "2022-03-09T02:13:05"
}
```
## `DELETE /api/v1/pleroma/admin/announcements/:id`
### Delete an announcement
- Response: JSON, empty object
```json
{}
```

View file

@ -0,0 +1,255 @@
# Chats
Chats are a way to represent an IM-style conversation between two actors. They are not the same as direct messages and they are not `Status`es, even though they have a lot in common.
## Why Chats?
There are no 'visibility levels' in ActivityPub, their definition is purely a Mastodon convention. Direct Messaging between users on the fediverse has mostly been modeled by using ActivityPub addressing following Mastodon conventions on normal `Note` objects. In this case, a 'direct message' would be a message that has no followers addressed and also does not address the special public actor, but just the recipients in the `to` field. It would still be a `Note` and is presented with other `Note`s as a `Status` in the API.
This is an awkward setup for a few reasons:
- As DMs generally still follow the usual `Status` conventions, it is easy to accidentally pull somebody into a DM thread by mentioning them. (e.g. "I hate @badguy so much")
- It is possible to go from a publicly addressed `Status` to a DM reply, back to public, then to a 'followers only' reply, and so on. This can be become very confusing, as it is unclear which user can see which part of the conversation.
- The standard `Status` format of implicit addressing also leads to rather ugly results if you try to display the messages as a chat, because all the recipients are always mentioned by name in the message.
- As direct messages are posted with the same api call (and usually same frontend component) as public messages, accidentally making a public message private or vice versa can happen easily. Client bugs can also lead to this, accidentally making private messages public.
As a measure to improve this situation, the `Conversation` concept and related Akkoma extensions were introduced. While it made it possible to work around a few of the issues, many of the problems remained and it didn't see much adoption because it was too complicated to use correctly.
## Chats explained
For this reasons, Chats are a new and different entity, both in the API as well as in ActivityPub. A quick overview:
- Chats are meant to represent an instant message conversation between two actors. For now these are only 1-on-1 conversations, but the other actor can be a group in the future.
- Chat messages have the ActivityPub type `ChatMessage`. They are not `Note`s. Servers that don't understand them will just drop them.
- The only addressing allowed in `ChatMessage`s is one single ActivityPub actor in the `to` field.
- There's always only one Chat between two actors. If you start chatting with someone and later start a 'new' Chat, the old Chat will be continued.
- `ChatMessage`s are posted with a different api, making it very hard to accidentally send a message to the wrong person.
- `ChatMessage`s don't show up in the existing timelines.
- Chats can never go from private to public. They are always private between the two actors.
## Caveats
- Chats are NOT E2E encrypted (yet). Security is still the same as email.
## API
In general, the way to send a `ChatMessage` is to first create a `Chat`, then post a message to that `Chat`. `Group`s will later be supported by making them a sub-type of `Account`.
This is the overview of using the API. The API is also documented via OpenAPI, so you can view it and play with it by pointing SwaggerUI or a similar OpenAPI tool to `https://yourinstance.tld/api/openapi`.
### Creating or getting a chat.
To create or get an existing Chat for a certain recipient (identified by Account ID)
you can call:
`POST /api/v1/pleroma/chats/by-account-id/:account_id`
The account id is the normal FlakeId of the user
```
POST /api/v1/pleroma/chats/by-account-id/someflakeid
```
If you already have the id of a chat, you can also use
```
GET /api/v1/pleroma/chats/:id
```
There will only ever be ONE Chat for you and a given recipient, so this call
will return the same Chat if you already have one with that user.
Returned data:
```json
{
"account": {
"id": "someflakeid",
"username": "somenick",
...
},
"id" : "1",
"unread" : 2,
"last_message" : {...}, // The last message in that chat
"updated_at": "2020-04-21T15:11:46.000Z"
}
```
### Marking a chat as read
To mark a number of messages in a chat up to a certain message as read, you can use
`POST /api/v1/pleroma/chats/:id/read`
Parameters:
- last_read_id: Given this id, all chat messages until this one will be marked as read. Required.
Returned data:
```json
{
"account": {
"id": "someflakeid",
"username": "somenick",
...
},
"id" : "1",
"unread" : 0,
"updated_at": "2020-04-21T15:11:46.000Z"
}
```
### Marking a single chat message as read
To set the `unread` property of a message to `false`
`POST /api/v1/pleroma/chats/:id/messages/:message_id/read`
Returned data:
The modified chat message
### Getting a list of Chats
`GET /api/v1/pleroma/chats`
This will return a list of chats that you have been involved in, sorted by their
last update (so new chats will be at the top).
Parameters:
- with_muted: Include chats from muted users (boolean).
Returned data:
```json
[
{
"account": {
"id": "someflakeid",
"username": "somenick",
...
},
"id" : "1",
"unread" : 2,
"last_message" : {...}, // The last message in that chat
"updated_at": "2020-04-21T15:11:46.000Z"
}
]
```
The recipient of messages that are sent to this chat is given by their AP ID.
No pagination is implemented for now.
### Getting the messages for a Chat
For a given Chat id, you can get the associated messages with
`GET /api/v1/pleroma/chats/:id/messages`
This will return all messages, sorted by most recent to least recent. The usual
pagination options are implemented.
Returned data:
```json
[
{
"account_id": "someflakeid",
"chat_id": "1",
"content": "Check this out :firefox:",
"created_at": "2020-04-21T15:11:46.000Z",
"emojis": [
{
"shortcode": "firefox",
"static_url": "https://dontbulling.me/emoji/Firefox.gif",
"url": "https://dontbulling.me/emoji/Firefox.gif",
"visible_in_picker": false
}
],
"id": "13",
"unread": true
},
{
"account_id": "someflakeid",
"chat_id": "1",
"content": "Whats' up?",
"created_at": "2020-04-21T15:06:45.000Z",
"emojis": [],
"id": "12",
"unread": false,
"idempotency_key": "75442486-0874-440c-9db1-a7006c25a31f"
}
]
```
- idempotency_key: The copy of the `idempotency-key` HTTP request header that can be used for optimistic message sending. Included only during the first few minutes after the message creation.
### Posting a chat message
Posting a chat message for given Chat id works like this:
`POST /api/v1/pleroma/chats/:id/messages`
Parameters:
- content: The text content of the message. Optional if media is attached.
- media_id: The id of an upload that will be attached to the message.
Currently, no formatting beyond basic escaping and emoji is implemented.
Returned data:
```json
{
"account_id": "someflakeid",
"chat_id": "1",
"content": "Check this out :firefox:",
"created_at": "2020-04-21T15:11:46.000Z",
"emojis": [
{
"shortcode": "firefox",
"static_url": "https://dontbulling.me/emoji/Firefox.gif",
"url": "https://dontbulling.me/emoji/Firefox.gif",
"visible_in_picker": false
}
],
"id": "13",
"unread": false
}
```
### Deleting a chat message
Deleting a chat message for given Chat id works like this:
`DELETE /api/v1/pleroma/chats/:chat_id/messages/:message_id`
Returned data is the deleted message.
### Notifications
There's a new `pleroma:chat_mention` notification, which has this form. It is not given out in the notifications endpoint by default, you need to explicitly request it with `include_types[]=pleroma:chat_mention`:
```json
{
"id": "someid",
"type": "pleroma:chat_mention",
"account": { ... } // User account of the sender,
"chat_message": {
"chat_id": "1",
"id": "10",
"content": "Hello",
"account_id": "someflakeid",
"unread": false
},
"created_at": "somedate"
}
```
### Streaming
There is an additional `user:pleroma_chat` stream. Incoming chat messages will make the current chat be sent to this `user` stream. The `event` of an incoming chat message is `pleroma:chat_update`. The payload is the updated chat with the incoming chat message in the `last_message` field.
### Web Push
If you want to receive push messages for this type, you'll need to add the `pleroma:chat_mention` type to your alerts in the push subscription.

View file

@ -40,10 +40,6 @@ Has these additional fields under the `pleroma` object:
- `parent_visible`: If the parent of this post is visible to the user or not.
- `pinned_at`: a datetime (iso8601) when status was pinned, `null` otherwise.
The `GET /api/v1/statuses/:id/source` endpoint additionally has the following attributes:
- `content_type`: The content type of the status source.
## Scheduled statuses
Has these additional fields in `params`:
@ -103,11 +99,13 @@ Has these additional fields under the `pleroma` object:
- `hide_followers_count`: boolean, true when the user has follower stat hiding enabled
- `hide_follows_count`: boolean, true when the user has follow stat hiding enabled
- `settings_store`: A generic map of settings for frontends. Opaque to the backend. Only returned in `/api/v1/accounts/verify_credentials` and `/api/v1/accounts/update_credentials`
- `chat_token`: The token needed for Akkoma shoutbox. Only returned in `/api/v1/accounts/verify_credentials`
- `deactivated`: boolean, true when the user is deactivated
- `allow_following_move`: boolean, true when the user allows automatically follow moved following accounts
- `unread_conversation_count`: The count of unread conversations. Only returned to the account owner.
- `unread_notifications_count`: The count of unread notifications. Only returned to the account owner.
- `notification_settings`: object, can be absent. See `/api/v1/pleroma/notification_settings` for the parameters/keys returned.
- `accepts_chat_messages`: boolean, but can be null if we don't have that information about a user
- `favicon`: nullable URL string, Favicon image of the user's instance
### Source
@ -161,6 +159,15 @@ The `type` value is `pleroma:emoji_reaction`. Has these fields:
- `account`: The account of the user who reacted
- `status`: The status that was reacted on
### ChatMention Notification (not default)
This notification has to be requested explicitly.
The `type` value is `pleroma:chat_mention`
- `account`: The account who sent the message
- `chat_message`: The chat message
### Report Notification (not default)
This notification has to be requested explicitly.
@ -175,7 +182,7 @@ The `type` value is `pleroma:report`
Accepts additional parameters:
- `exclude_visibilities`: will exclude the notifications for activities with the given visibilities. The parameter accepts an array of visibility types (`public`, `unlisted`, `private`, `direct`). Usage example: `GET /api/v1/notifications?exclude_visibilities[]=direct&exclude_visibilities[]=private`.
- `include_types`: will include the notifications for activities with the given types. The parameter accepts an array of types (`mention`, `follow`, `reblog`, `favourite`, `move`, `pleroma:emoji_reaction`, `pleroma:report`). Usage example: `GET /api/v1/notifications?include_types[]=mention&include_types[]=reblog`.
- `include_types`: will include the notifications for activities with the given types. The parameter accepts an array of types (`mention`, `follow`, `reblog`, `favourite`, `move`, `pleroma:emoji_reaction`, `pleroma:chat_mention`, `pleroma:report`). Usage example: `GET /api/v1/notifications?include_types[]=mention&include_types[]=reblog`.
## DELETE `/api/v1/notifications/destroy_multiple`
@ -195,7 +202,7 @@ Additional parameters can be added to the JSON body/Form data:
- `preview`: boolean, if set to `true` the post won't be actually posted, but the status entity would still be rendered back. This could be useful for previewing rich text/custom emoji, for example.
- `content_type`: string, contain the MIME type of the status, it is transformed into HTML by the backend. You can get the list of the supported MIME types with the nodeinfo endpoint.
- `to`: A list of nicknames (like `admin@otp.akkoma.dev` or `admin` on the local server) that will be used to determine who is going to be addressed by this post. Using this will disable the implicit addressing by mentioned names in the `status` body, only the people in the `to` list will be addressed. The normal rules for post visibility are not affected by this and will still apply.
- `to`: A list of nicknames (like `lain@soykaf.club` or `lain` on the local server) that will be used to determine who is going to be addressed by this post. Using this will disable the implicit addressing by mentioned names in the `status` body, only the people in the `to` list will be addressed. The normal rules for post visibility are not affected by this and will still apply.
- `visibility`: string, besides standard MastoAPI values (`direct`, `private`, `unlisted`, `local` or `public`) it can be used to address a List by setting it to `list:LIST_ID`.
- `expires_in`: The number of seconds the posted activity should expire in. When a posted activity expires it will be deleted from the server, and a delete request for it will be federated. This needs to be longer than an hour.
- `in_reply_to_conversation_id`: Will reply to a given conversation, addressing only the people who are part of the recipient set of that conversation. Sets the visibility to `direct`.
@ -233,6 +240,7 @@ Additional parameters can be added to the JSON body/Form data:
- `pleroma_background_image` - sets the background image of the user. Can be set to "" (an empty string) to reset.
- `discoverable` - if true, external services (search bots) etc. are allowed to index / list the account (regardless of this setting, user will still appear in regular search results).
- `actor_type` - the type of this account.
- `accepts_chat_messages` - if false, this account will reject all chat messages.
- `language` - user's preferred language for receiving emails (digest, confirmation, etc.)
All images (avatar, banner and background) can be reset to the default by sending an empty string ("") instead of a file.
@ -292,6 +300,7 @@ Has these additional parameters (which are the same as in Akkoma-API):
`GET /api/v1/instance` has additional fields
- `max_toot_chars`: The maximum characters per post
- `chat_limit`: The maximum characters per chat message
- `description_limit`: The maximum characters per image description
- `poll_limits`: The limits of polls
- `upload_limit`: The maximum upload file size
@ -312,6 +321,7 @@ Has these additional parameters (which are the same as in Akkoma-API):
Permits these additional alert types:
- pleroma:chat_mention
- pleroma:emoji_reaction
## Markers
@ -322,6 +332,10 @@ Has these additional fields under the `pleroma` object:
## Streaming
### Chats
There is an additional `user:pleroma_chat` stream. Incoming chat messages will make the current chat be sent to this `user` stream. The `event` of an incoming chat message is `pleroma:chat_update`. The payload is the updated chat with the incoming chat message in the `last_message` field.
### Remote timelines
For viewing remote server timelines, there are `public:remote` and `public:remote:media` streams. Each of these accept a parameter like `?instance=lain.com`.

View file

@ -44,8 +44,11 @@ See also [the Nodeinfo standard](https://nodeinfo.diaspora.software/).
"shareable_emoji_packs",
"multifetch",
"pleroma:api/v1/notifications:include_types_filter",
"chat",
"shout",
"relay",
"pleroma_emoji_reactions"
"pleroma_emoji_reactions",
"pleroma_chat_messages"
],
"federation":{
"enabled":true,
@ -201,8 +204,11 @@ See also [the Nodeinfo standard](https://nodeinfo.diaspora.software/).
"shareable_emoji_packs",
"multifetch",
"pleroma:api/v1/notifications:include_types_filter",
"chat",
"shout",
"relay",
"pleroma_emoji_reactions"
"pleroma_emoji_reactions",
"pleroma_chat_messages"
],
"federation":{
"enabled":true,

View file

@ -576,6 +576,38 @@ The status posting endpoint takes an additional parameter, `in_reply_to_conversa
* Response: the archive of the pack with a 200 status code, 403 if the pack is not set as shared,
404 if the pack does not exist
## `GET /api/v1/pleroma/accounts/:id/scrobbles`
### Requests a list of current and recent Listen activities for an account
* Method `GET`
* Authentication: not required
* Params: None
* Response: An array of media metadata entities.
* Example response:
```json
[
{
"account": {...},
"id": "1234",
"title": "Some Title",
"artist": "Some Artist",
"album": "Some Album",
"length": 180000,
"created_at": "2019-09-28T12:40:45.000Z"
}
]
```
## `POST /api/v1/pleroma/scrobble`
### Creates a new Listen activity for an account
* Method `POST`
* Authentication: required
* Params:
* `title`: the title of the media playing
* `album`: the album of the media playing [optional]
* `artist`: the artist of the media playing [optional]
* `length`: the length of the media playing [optional]
* Response: the newly created media metadata entity representing the Listen activity
# Emoji Reactions
Emoji reactions work a lot like favourites do. They make it possible to react to a post with a single emoji character. To detect the presence of this feature, you can check `pleroma_emoji_reactions` entry in the features list of nodeinfo.

View file

@ -40,5 +40,5 @@ The following is a config example to use with [Grafana](https://grafana.com)
metrics_path: /api/pleroma/app_metrics
scheme: https
static_configs:
- targets: ['otp.akkoma.dev']
- targets: ['pleroma.soykaf.com']
```

View file

@ -26,3 +26,40 @@ Response: HTTP 201 Created with the object into the body, no `Location` header p
The object given in the reponse should then be inserted into an Object's `attachment` field.
## ChatMessages
`ChatMessage`s are the messages sent in 1-on-1 chats. They are similar to
`Note`s, but the addresing is done by having a single AP actor in the `to`
field. Addressing multiple actors is not allowed. These messages are always
private, there is no public version of them. They are created with a `Create`
activity.
They are part of the `litepub` namespace as `http://litepub.social/ns#ChatMessage`.
Example:
```json
{
"actor": "http://2hu.gensokyo/users/raymoo",
"id": "http://2hu.gensokyo/objects/1",
"object": {
"attributedTo": "http://2hu.gensokyo/users/raymoo",
"content": "You expected a cute girl? Too bad.",
"id": "http://2hu.gensokyo/objects/2",
"published": "2020-02-12T14:08:20Z",
"to": [
"http://2hu.gensokyo/users/marisa"
],
"type": "ChatMessage"
},
"published": "2018-02-12T14:08:20Z",
"to": [
"http://2hu.gensokyo/users/marisa"
],
"type": "Create"
}
```
This setup does not prevent multi-user chats, but these will have to go through
a `Group`, which will be the recipient of the messages and then `Announce` them
to the users in the `Group`.

Binary file not shown.

After

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 66 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

View file

@ -7,37 +7,22 @@ It actually consists of two components: a backend, named simply Akkoma, and a us
It's part of what we call the fediverse, a federated network of instances which speak common protocols and can communicate with each other.
One account on an instance is enough to talk to the entire fediverse!
## Community Channels
### IRC
For support or general questions, pop over to #akkoma and #akkoma-dev at [irc.akkoma.dev](https://irc.akkoma.dev) (port 6697, SSL)
### Discourse
For more general meta-discussion, for example discussion of potential future features, head on over to [meta.akkoma.dev](https://meta.akkoma.dev)
### Dev diaries and release notifications
will be posted via [@akkoma@ihba](https://ihatebeinga.live/users/akkoma)
## How can I use it?
Akkoma instances are already widely deployed, a list can be found at <https://the-federation.info/akkoma> and <https://akkoma.fediverse.observer/list>.
Akkoma instances are already widely deployed, a list can be found at <https://the-federation.info/pleroma> and <https://fediverse.network/pleroma>.
If you don't feel like joining an existing instance, but instead prefer to deploy your own instance, that's easy too!
Installation instructions can be found in the installation section of these docs.
## I got an account, now what?
Great! Now you can explore the fediverse! Open the login page for your Akkoma instance (e.g. <https://otp.akkoma.dev>) and login with your username and password. (If you don't have an account yet, click on Register)
Great! Now you can explore the fediverse! Open the login page for your Akkoma instance (e.g. <https://pleroma.soykaf.com>) and login with your username and password. (If you don't have an account yet, click on Register)
### Pleroma-FE
The default front-end used by Akkoma is Pleroma-FE. You can find more information on what it is and how to use it in the [Introduction to Pleroma-FE](https://docs-fe.akkoma.dev/stable/).
### Mastodon interface
If the Pleroma-FE interface isn't your thing, or you're just trying something new but you want to keep using the familiar Mastodon interface, we got that too!
Just add a "/web" after your instance url (e.g. <https://otp.akkoma.dev/web>) and you'll end on the Mastodon web interface, but with a Akkoma backend! MAGIC!
Just add a "/web" after your instance url (e.g. <https://pleroma.soykaf.com/web>) and you'll end on the Mastodon web interface, but with a Akkoma backend! MAGIC!
The Mastodon interface is from the Glitch-soc fork. For more information on the Mastodon interface you can check the [Mastodon](https://docs.joinmastodon.org/) and [Glitch-soc](https://glitch-soc.github.io/docs/) documentation.
Remember, what you see is only the frontend part of Mastodon, the backend is still Akkoma.

View file

@ -221,8 +221,6 @@ If your instance is up and running, you can create your first user with administ
doas -u akkoma env MIX_ENV=prod mix pleroma.user new <username> <your@emailaddress> --admin
```
{! installation/frontends.include !}
#### Further reading
{! installation/further_reading.include !}

View file

@ -212,8 +212,6 @@ If your instance is up and running, you can create your first user with administ
sudo -Hu akkoma MIX_ENV=prod mix pleroma.user new <username> <your@emailaddress> --admin
```
{! installation/frontends.include !}
#### Further reading
{! installation/further_reading.include !}

View file

@ -175,8 +175,6 @@ If your instance is up and running, you can create your first user with administ
sudo -Hu akkoma MIX_ENV=prod mix pleroma.user new <username> <your@emailaddress> --admin
```
{! installation/frontends.include !}
#### Further reading
{! installation/further_reading.include !}

View file

@ -0,0 +1,188 @@
# Akkomaの入れ方
## 日本語訳について
この記事は [Installing on Debian based distributions](Installing on Debian based distributions) の日本語訳です。何かがおかしいと思ったら、原文を見てください。
## インストール
このガイドはDebian Stretchを利用することを想定しています。Ubuntu 16.04や18.04でもおそらく動作します。また、ユーザはrootもしくはsudoにより管理者権限を持っていることを前提とします。もし、以下の操作をrootユーザで行う場合は、 `sudo` を無視してください。ただし、`sudo -Hu akkoma` のようにユーザを指定している場合には `su <username> -s $SHELL -c 'command'` を代わりに使ってください。
### 必要なソフトウェア
- PostgreSQL 9.6以上 (Ubuntu16.04では9.5しか提供されていないので,[](https://www.postgresql.org/download/linux/ubuntu/)こちらから新しいバージョンを入手してください)
- `postgresql-contrib` 9.6以上 (同上)
- Elixir 1.8 以上 ([Debianのリポジトリからインストールしないこと ここからインストールすること!](https://elixir-lang.org/install.html#unix-and-unix-like)。または [asdf](https://github.com/asdf-vm/asdf) をakkomaユーザーでインストールしてください)
- `erlang-dev`
- `erlang-nox`
- `git`
- `build-essential`
- `cmake`
- `libmagic-dev`
#### このガイドで利用している追加パッケージ
- `nginx` (おすすめです。他のリバースプロキシを使う場合は、参考となる設定をこのリポジトリから探してください)
- `certbot` (または何らかのLet's Encrypt向けACMEクライアント)
- `ImageMagick`
- `ffmpeg`
- `exiftool`
### システムを準備する
* まずシステムをアップデートしてください。
```
sudo apt update
sudo apt full-upgrade
```
* 上記に挙げたパッケージをインストールしておきます。
```
sudo apt install git build-essential postgresql postgresql-contrib cmake ffmpeg imagemagick libmagic-dev
```
### ElixirとErlangをインストールします
* Erlangのリポジトリをダウンロードおよびインストールします。
```
wget -P /tmp/ https://packages.erlang-solutions.com/erlang-solutions_2.0_all.deb
sudo dpkg -i /tmp/erlang-solutions_2.0_all.deb
```
* ElixirとErlangをインストールします、
```
sudo apt update
sudo apt install elixir erlang-dev erlang-nox
```
### オプションパッケージ: [`docs/installation/optional/media_graphics_packages.md`](../installation/optional/media_graphics_packages.md)
```shell
sudo apt install imagemagick ffmpeg libimage-exiftool-perl
```
### Akkoma BE (バックエンド) をインストールします
* Akkoma用に新しいユーザーを作ります。
```
sudo useradd -r -s /bin/false -m -d /var/lib/akkoma -U akkoma
```
**注意**: Akkomaユーザとして単発のコマンドを実行したい場合はは、`sudo -Hu akkoma command` を使ってください。シェルを使いたい場合は `sudo -Hu akkoma $SHELL`です。もし `sudo` を使わない場合は、rootユーザで `su -l akkoma -s $SHELL -c 'command'` とすることでコマンドを、`su -l akkoma -s $SHELL` とすることでシェルを開始できます。
* Gitリポジトリをクローンします。
```
sudo mkdir -p /opt/akkoma
sudo chown -R akkoma:akkoma /opt/akkoma
sudo -Hu akkoma git clone https://akkoma.dev/AkkomaGang/akkoma.git /opt/akkoma
```
* 新しいディレクトリに移動します。
```
cd /opt/akkoma
```
* Akkomaが依存するパッケージをインストールします。Hexをインストールしてもよいか聞かれたら、yesを入力してください。
```
sudo -Hu akkoma mix deps.get
```
* コンフィギュレーションを生成します。
```
sudo -Hu akkoma MIX_ENV=prod mix pleroma.instance gen
```
* rebar3をインストールしてもよいか聞かれたら、yesを入力してください。
* このときにakkomaの一部がコンパイルされるため、この処理には時間がかかります。
* あなたのインスタンスについて、いくつかの質問されます。この質問により `config/generated_config.exs` という設定ファイルが生成されます。
* コンフィギュレーションを確認して、もし問題なければ、ファイル名を変更してください。
```
sudo -Hu akkoma mv config/{generated_config.exs,prod.secret.exs}
```
* 先程のコマンドで、すでに `config/setup_db.psql` というファイルが作られています。このファイルをもとに、データベースを作成します。
```
sudo -Hu akkoma MIX_ENV=prod mix pleroma.instance gen
```
* そして、データベースのマイグレーションを実行します。
```
sudo -Hu akkoma MIX_ENV=prod mix ecto.migrate
```
* これでAkkomaを起動できるようになりました。
```
sudo -Hu akkoma MIX_ENV=prod mix phx.server
```
### インストールの最終段階
あなたの新しいインスタンスを世界に向けて公開するには、nginx等のWebサーバやプロキシサーバをAkkomaの前段に使用する必要があります。また、Akkoma のためにシステムサービスファイルを作成する必要があります。
#### Nginx
* まだインストールしていないなら、nginxをインストールします。
```
sudo apt install nginx
```
* SSLをセットアップします。他の方法でもよいですが、ここではcertbotを説明します。
certbotを使うならば、まずそれをインストールします。
```
sudo apt install certbot
```
そしてセットアップします。
```
sudo mkdir -p /var/lib/letsencrypt/
sudo certbot certonly --email <your@emailaddress> -d <yourdomain> --standalone
```
もしうまくいかないときは、nginxが正しく動いていない可能性があります。先にnginxを設定してください。ssl "on" を "off" に変えてから再試行してください。
---
* nginxの設定ファイルサンプルをnginxフォルダーにコピーします。
```
sudo cp /opt/akkoma/installation/nginx/akkoma.nginx /etc/nginx/sites-available/akkoma.nginx
sudo ln -s /etc/nginx/sites-available/akkoma.nginx /etc/nginx/sites-enabled/akkoma.nginx
```
* nginxを起動する前に、設定ファイルを編集してください。例えば、サーバー名、証明書のパスなどを変更する必要があります。
* nginxを再起動します。
```
sudo systemctl enable --now nginx.service
```
もし証明書を更新する必要が出てきた場合には、nginxの関連するlocationブロックのコメントアウトを外し、以下のコマンドを動かします。
```
sudo certbot certonly --email <your@emailaddress> -d <yourdomain> --webroot -w /var/lib/letsencrypt/
```
#### 他のWebサーバやプロキシ
これに関してはサンプルが `/opt/akkoma/installation/` にあるので、探してみてください。
#### Systemd サービス
* サービスファイルのサンプルをコピーします。
```
sudo cp /opt/akkoma/installation/akkoma.service /etc/systemd/system/akkoma.service
```
* サービスファイルを変更します。すべてのパスが正しいことを確認してください
* サービスを有効化し `akkoma.service` を開始してください
```
sudo systemctl enable --now akkoma.service
```
#### 初期ユーザの作成
新たにインスタンスを作成したら、以下のコマンドにより管理者権限を持った初期ユーザを作成できます。
```
sudo -Hu akkoma MIX_ENV=prod mix pleroma.user new <username> <your@emailaddress> --admin
```
#### その他の設定とカスタマイズ
{! installation/further_reading.include !}

View file

@ -1,162 +0,0 @@
# Installing in Docker
## Installation
This guide will show you how to get akkoma working in a docker container,
if you want isolation, or if you run a distribution not supported by the OTP
releases.
If you want to migrate from or OTP to docker, check out [the migration guide](./migrating_to_docker_en.md).
### Prepare the system
* Install docker and docker-compose
* [Docker](https://docs.docker.com/engine/install/)
* [Docker-compose](https://docs.docker.com/compose/install/)
* This will usually just be a repository installation and a package manager invocation.
* Clone the akkoma repository
* `git clone https://akkoma.dev/AkkomaGang/akkoma.git -b stable`
* `cd akkoma`
### Set up basic configuration
```bash
cp docker-resources/env.example .env
echo "DOCKER_USER=$(id -u):$(id -g)" >> .env
```
This probably won't need to be changed, it's only there to set basic environment
variables for the docker-compose file.
### Building the container
The container provided is a thin wrapper around akkoma's dependencies,
it does not contain the code itself. This is to allow for easy updates
and debugging if required.
```bash
./docker-resources/build.sh
```
This will generate a container called `akkoma` which we can use
in our compose environment.
### Generating your instance
```bash
mkdir pgdata
./docker-resources/manage.sh mix deps.get
./docker-resources/manage.sh mix compile
./docker-resources/manage.sh mix pleroma.instance gen
```
This will ask you a few questions - the defaults are fine for most things,
the database hostname is `db`, the database password is `akkoma`
(not auto generated), and you will want to set the ip to `0.0.0.0`.
Now we'll want to copy over the config it just created
```bash
cp config/generated_config.exs config/prod.secret.exs
```
### Setting up the database
We need to run a few commands on the database container, this isn't too bad
```bash
docker-compose run --rm --user akkoma -d db
# Note down the name it gives here, it will be something like akkoma_db_run
docker-compose run --rm akkoma psql -h db -U akkoma -f config/setup_db.psql
docker stop akkoma_db_run # Replace with the name you noted down
```
Now we can actually run our migrations
```bash
./docker-resources/manage.sh mix ecto.migrate
# this will recompile your files at the same time, since we changed the config
```
### Start the server
We're going to run it in the foreground on the first run, just to make sure
everything start up.
```bash
docker-compose up
```
If everything went well, you should be able to access your instance at http://localhost:4000
You can `ctrl-c` out of the docker-compose now to shutdown the server.
### Running in the background
```bash
docker-compose up -d
```
### Create your first user
If your instance is up and running, you can create your first user with administrative rights with the following task:
```shell
./docker-resources/manage.sh mix pleroma.user new MY_USERNAME MY_EMAIL@SOMEWHERE --admin
```
And follow the prompts
### Reverse proxies
This is a tad more complex in docker than on the host itself. It
You've got two options.
#### Running caddy in a container
This is by far the easiest option. It'll handle HTTPS and all that for you.
```bash
mkdir caddy-data
mkdir caddy-config
cp docker-resources/Caddyfile.example docker-resources/Caddyfile
```
Then edit the TLD in your caddyfile to the domain you're serving on.
Uncomment the `caddy` section in the docker-compose file,
then run `docker-compose up -d` again.
#### Running a reverse proxy on the host
If you want, you can also run the reverse proxy on the host. This is a bit more complex, but it's also more flexible.
Follow the guides for source install for your distribution of choice, or adapt
as needed. Your standard setup can be found in the [Debian Guide](../debian_based_en/#nginx)
### You're done!
All that's left is to set up your frontends.
The standard from-source commands will apply to you, just make sure you
prefix them with `./docker-resources/manage.sh`!
{! installation/frontends.include !}
### Updating Docker Installs
```bash
git pull
./docker-resources/build.sh
./docker-resources/manage.sh mix deps.get
./docker-resources/manage.sh mix compile
./docker-resources/manage.sh mix ecto.migrate
docker-compose restart akkoma db
```
#### Further reading
{! installation/further_reading.include !}
{! support.include !}

View file

@ -1,208 +0,0 @@
# Installing on Fedora
## OTP releases and RedHat-distributions
While the OTP releases of Akkoma work on most Linux distributions, they do not work correctly with RedHat-distributions. Therefore from-source installations are the recommended way to go when trying to install Akkoma on Fedora, Centos Stream or RedHat.
However, it is possible to compile your own OTP release of Akkoma for RedHat. Keep in mind that this has a few drawbacks, and has no particular advantage over a from-source installation, since you'll need to install Erlang and Elixir anyway.
This guide will cover a from-source installation. For instructions on how to build your own OTP release, please check out [the OTP for RedHat guide](./otp_redhat_en.md).
## Installation
This guide will assume you are on Fedora 36. This guide should also work with current releases of Centos Stream and RedHat, although it has not been tested yet. It also assumes that you have administrative rights, either as root or a user with [sudo permissions](https://docs.fedoraproject.org/en-US/quick-docs/adding_user_to_sudoers_file/). If you want to run this guide with root, ignore the `sudo` at the beginning of the lines, unless it calls a user like `sudo -Hu akkoma`; in this case, use `su <username> -s $SHELL -c 'command'` instead.
{! installation/generic_dependencies.include !}
### Prepare the system
* First update the system, if not already done:
```shell
sudo dnf upgrade --refresh
```
* Install some of the above mentioned programs:
```shell
sudo dnf install git gcc g++ make cmake file-devel postgresql-server postgresql-contrib
```
* Enable and initialize Postgres:
```shell
sudo systemctl enable postgresql.service
sudo postgresql-setup --initdb --unit postgresql
# Allow password auth for postgres
sudo sed -E -i 's|(host +all +all +127.0.0.1/32 +)ident|\1md5|' /var/lib/pgsql/data/pg_hba.conf
sudo systemctl start postgresql.service
```
### Install Elixir and Erlang
* Install Elixir and Erlang:
```shell
sudo dnf install elixir erlang-os_mon erlang-eldap erlang-xmerl erlang-erl_interface erlang-syntax_tools
```
### Optional packages: [`docs/installation/optional/media_graphics_packages.md`](../installation/optional/media_graphics_packages.md)
* Install ffmpeg (requires setting up the RPM-fusion repositories):
```shell
sudo dnf -y install https://download1.rpmfusion.org/free/fedora/rpmfusion-free-release-$(rpm -E %fedora).noarch.rpm
sudo dnf -y install https://download1.rpmfusion.org/nonfree/fedora/rpmfusion-nonfree-release-$(rpm -E %fedora).noarch.rpm
sudo dnf install ffmpeg
```
* Install ImageMagick and ExifTool for image manipulation:
```shell
sudo dnf install Imagemagick perl-Image-ExifTool
```
### Install AkkomaBE
* Add a new system user for the Akkoma service:
```shell
sudo useradd -r -s /bin/false -m -d /var/lib/akkoma -U akkoma
```
**Note**: To execute a single command as the Akkoma system user, use `sudo -Hu akkoma command`. You can also switch to a shell by using `sudo -Hu akkoma $SHELL`. If you dont have and want `sudo` on your system, you can use `su` as root user (UID 0) for a single command by using `su -l akkoma -s $SHELL -c 'command'` and `su -l akkoma -s $SHELL` for starting a shell.
* Git clone the AkkomaBE repository and make the Akkoma user the owner of the directory:
```shell
sudo mkdir -p /opt/akkoma
sudo chown -R akkoma:akkoma /opt/akkoma
sudo -Hu akkoma git clone https://akkoma.dev/AkkomaGang/akkoma.git /opt/akkoma
```
* Change to the new directory:
```shell
cd /opt/akkoma
```
* Install the dependencies for Akkoma and answer with `yes` if it asks you to install `Hex`:
```shell
sudo -Hu akkoma mix deps.get
```
* Generate the configuration: `sudo -Hu akkoma MIX_ENV=prod mix pleroma.instance gen`
* Answer with `yes` if it asks you to install `rebar3`.
* This may take some time, because parts of akkoma get compiled first.
* After that it will ask you a few questions about your instance and generates a configuration file in `config/generated_config.exs`.
* Check the configuration and if all looks right, rename it, so Akkoma will load it (`prod.secret.exs` for productive instance, `dev.secret.exs` for development instances):
```shell
sudo -Hu akkoma mv config/{generated_config.exs,prod.secret.exs}
```
* The previous command creates also the file `config/setup_db.psql`, with which you can create the database:
```shell
sudo -Hu postgres psql -f config/setup_db.psql
```
* Now run the database migration:
```shell
sudo -Hu akkoma MIX_ENV=prod mix ecto.migrate
```
* Now you can start Akkoma already
```shell
sudo -Hu akkoma MIX_ENV=prod mix phx.server
```
### Finalize installation
If you want to open your newly installed instance to the world, you should run nginx or some other webserver/proxy in front of Akkoma and you should consider to create a systemd service file for Akkoma.
#### Nginx
* Install nginx, if not already done:
```shell
sudo dnf install nginx
```
* Setup your SSL cert, using your method of choice or certbot. If using certbot, first install it:
```shell
sudo dnf install certbot
```
and then set it up:
```shell
sudo mkdir -p /var/lib/letsencrypt/
sudo certbot certonly --email <your@emailaddress> -d <yourdomain> --standalone
```
If that doesnt work, make sure, that nginx is not already running. If it still doesnt work, try setting up nginx first (change ssl “on” to “off” and try again).
---
* Copy the example nginx configuration and activate it:
```shell
sudo cp /opt/akkoma/installation/nginx/akkoma.nginx /etc/nginx/conf.d/akkoma.conf
```
* Before starting nginx edit the configuration and change it to your needs (e.g. change servername, change cert paths)
* Enable and start nginx:
```shell
sudo systemctl enable --now nginx.service
```
If you need to renew the certificate in the future, uncomment the relevant location block in the nginx config and run:
```shell
sudo certbot certonly --email <your@emailaddress> -d <yourdomain> --webroot -w /var/lib/letsencrypt/
```
#### Other webserver/proxies
You can find example configurations for them in `/opt/akkoma/installation/`.
#### Systemd service
* Copy example service file
```shell
sudo cp /opt/akkoma/installation/akkoma.service /etc/systemd/system/akkoma.service
```
* Edit the service file and make sure that all paths fit your installation
* Enable and start `akkoma.service`:
```shell
sudo systemctl enable --now akkoma.service
```
#### Create your first user
If your instance is up and running, you can create your first user with administrative rights with the following task:
```shell
sudo -Hu akkoma MIX_ENV=prod mix pleroma.user new <username> <your@emailaddress> --admin
```
{! installation/frontends.include !}
#### Further reading
{! installation/further_reading.include !}
{! support.include !}

View file

@ -206,9 +206,6 @@ If your instance is up and running, you can create your first user with administ
```shell
sudo -Hu akkoma MIX_ENV=prod mix pleroma.user new <username> <your@emailaddress> --admin
```
{! installation/frontends.include !}
## Conclusion
Restart nginx with `# service nginx restart` and you should be up and running.

View file

@ -1,31 +0,0 @@
#### Installing Frontends
Once your backend server is functional, you'll also want to
probably install frontends.
These are no longer bundled with the distribution and need an extra
command to install.
For most installations, the following will suffice:
=== "OTP"
```sh
./bin/pleroma_ctl frontend install pleroma-fe --ref stable
# and also, if desired
./bin/pleroma_ctl frontend install admin-fe --ref stable
```
=== "From Source"
```sh
mix pleroma.frontend install pleroma-fe --ref stable
mix pleroma.frontend install admin-fe --ref stable
```
=== "Docker"
```sh
./docker-resources/manage.sh mix pleroma.frontend install pleroma-fe --ref stable
./docker-resources/manage.sh mix pleroma.frontend install admin-fe --ref stable
```
For more customised installations, refer to [Frontend Management](../../configuration/frontend_management)

View file

@ -1,7 +1,7 @@
## Required dependencies
* PostgreSQL 9.6+
* Elixir 1.12+ (1.13+ recommended)
* Elixir 1.9+
* Erlang OTP 22.2+
* git
* file / libmagic

View file

@ -18,12 +18,6 @@ dev-db/postgresql uuid
You could opt to add `USE="uuid"` to `/etc/portage/make.conf` if you'd rather set this as a global USE flags, but this flags does unrelated things in other packages, so keep that in mind if you elect to do so.
If you are planning to use `nginx`, as this guide suggests, you should also add the following flag to the same file.
```text
www-servers/nginx NGINX_MODULES_HTTP: slice
```
Double check your compiler flags in `/etc/portage/make.conf`. If you require any special compilation flags or would like to set up remote builds, now is the time to do so. Be sure that your CFLAGS and MAKEOPTS make sense for the platform you are using. It is not recommended to use above `-O2` or risky optimization flags for a production server.
### Installing a cron daemon
@ -268,7 +262,7 @@ Even if you are using S3, Akkoma needs someplace to store media posted on your i
```shell
akkoma$ mkdir -p ~/akkoma/uploads
```
```
#### init.d service
@ -278,9 +272,7 @@ Even if you are using S3, Akkoma needs someplace to store media posted on your i
# cp /home/akkoma/akkoma/installation/init.d/akkoma /etc/init.d/
```
* Change the `/opt/akkoma` path in this file to `/home/akkoma/akkoma`
* Be sure to take a look at this service file and make sure that all other paths fit your installation
* Be sure to take a look at this service file and make sure that all paths fit your installation
* Enable and start `akkoma`:
@ -301,8 +293,6 @@ akkoma$ MIX_ENV=prod mix pleroma.user new <username> <your@emailaddress> --admin
If you opted to allow sudo for the `akkoma` user but would like to remove the ability for greater security, now might be a good time to edit `/etc/sudoers` and/or change the groups the `akkoma` user belongs to. Be sure to restart the akkoma service afterwards to ensure it picks up on the changes.
{! installation/frontends.include !}
#### Further reading
{! installation/further_reading.include !}

View file

@ -87,7 +87,7 @@ export FLAVOUR="amd64-musl"
# Clone the release build into a temporary directory and unpack it
# Replace `stable` with `unstable` if you want to run the unstable branch
su akkoma -s $SHELL -lc "
curl 'https://akkoma-updates.s3-website.fr-par.scw.cloud/stable/akkoma-$FLAVOUR.zip' -o /tmp/akkoma.zip
curl 'https://akkoma-updates.s3-website.fr-par.scw.cloud/develop/akkoma-$FLAVOUR.zip' -o /tmp/akkoma.zip
unzip /tmp/akkoma.zip -d /tmp/
"

View file

@ -1,5 +1,7 @@
# Migrating to Akkoma
**Akkoma does not currently have a stable release, until 3.0, all builds should be considered "develop"**
## Why should you migrate?
aside from actually responsive maintainer(s)? let's lookie here, we've got:
@ -9,8 +11,6 @@ aside from actually responsive maintainer(s)? let's lookie here, we've got:
- elasticsearch support (because pleroma search is GARBAGE)
- latest develop pleroma-fe additions
- local-only posting
- automatic post translation
- the mastodon frontend back in all its glory
- probably more, this is like 3.5 years of IHBA additions finally compiled
## Actually migrating
@ -30,36 +30,31 @@ upstream git URL then just rebuild - that'll be:
git remote set-url origin https://akkoma.dev/AkkomaGang/akkoma.git/
git fetch origin
git pull -r
# or, if you're on an instance-specific branch, you may want
# to run "git merge stable" instead (or develop if you want)
```
### WARNING - Migrating from Pleroma Develop
If you are on pleroma develop, and have updated since 2022-08, you may have issues with database migrations.
Please roll back the given migrations:
```bash
MIX_ENV=prod mix ecto.rollback --migrations-path priv/repo/optional_migrations/pleroma_develop_rollbacks -n3
```
Then compile, migrate and restart as usual.
## From OTP
This will just be setting the update URL - find your flavour from the [mapping on the install guide](../otp_en/#detecting-flavour) first.
**IMPORTANT: if you are using musl1.1 (void linux musl edition),
you will need to override the FLAVOUR to amd64-musl11,
also pls go shout at your maintainers to actually upgrade from EOL software.**
the flavour to be
This will just be setting the update URL -
```bash
export FLAVOUR=[the flavour you found above]
export FLAVOUR=$(arch="$(uname -m)";if [ "$arch" = "x86_64" ];then arch="amd64";elif [ "$arch" = "armv7l" ];then arch="arm";elif [ "$arch" = "aarch64" ];then arch="arm64";else echo "Unsupported arch: $arch">&2;fi;if getconf GNU_LIBC_VERSION>/dev/null;then libc_postfix="";elif [ "$(ldd 2>&1|head -c 9)" = "musl libc" ];then libc_postfix="-musl";elif [ "$(find /lib/libc.musl*|wc -l)" ];then libc_postfix="-musl";else echo "Unsupported libc">&2;fi;echo "$arch$libc_postfix")
./bin/pleroma_ctl update --zip-url https://akkoma-updates.s3-website.fr-par.scw.cloud/stable/akkoma-$FLAVOUR.zip
./bin/pleroma_ctl update --zip-url https://akkoma-updates.s3-website.fr-par.scw.cloud/develop/akkoma-$FLAVOUR.zip
./bin/pleroma_ctl migrate
```
Then restart. When updating in the future, you canjust use
```bash
./bin/pleroma_ctl update --branch stable
./bin/pleroma_ctl update --branch develop
```
## Frontend changes
@ -69,20 +64,14 @@ your upgrade path here depends on your setup
### I just run with the built-in frontend
You'll need to run a couple of commands,
You'll need to run a single command,
=== "OTP"
```sh
./bin/pleroma_ctl frontend install pleroma-fe --ref stable
# and also, if desired
./bin/pleroma_ctl frontend install admin-fe --ref stable
```
=== "From Source"
```sh
mix pleroma.frontend install pleroma-fe --ref stable
mix pleroma.frontend install admin-fe --ref stable
```
```bash
# From source
mix pleroma.frontend install pleroma-fe
# OTP
./bin/pleroma_ctl frontend install pleroma-fe
```
### I've run the mix task to install a frontend
@ -95,26 +84,3 @@ Your situation will likely be unique - you'll need the changes in the
[forked pleroma-fe repository](https://akkoma.dev/AkkomaGang/pleroma-fe),
and either merge or cherry-pick from there depending on how you've got
things.
## Common issues
### The frontend doesn't show after installing it
This may occur if you are using database configuration.
Sometimes the config in your database will cause akkoma to still report
that there's no frontend, even when you've run the install.
To fix this, run:
=== "OTP"
```sh
./bin/pleroma_ctl config delete pleroma frontends
```
=== "From Source"
```sh
mix pleroma.config delete pleroma frontends
```
which will remove the config from the database. Things should work now.

View file

@ -1,158 +0,0 @@
# Migrating to a Docker Installation
If you for any reason wish to migrate a source or OTP install to a docker one,
this guide is for you.
You have a few options - your major one will be whether you want to keep your
reverse-proxy setup from before.
You probably should, in the first instance.
### Prepare the system
* Install docker and docker-compose
* [Docker](https://docs.docker.com/engine/install/)
* [Docker-compose](https://docs.docker.com/compose/install/)
* This will usually just be a repository installation and a package manager invocation.
=== "Source"
```bash
git pull
```
=== "OTP"
Clone the akkoma repository
```bash
git clone https://akkoma.dev/AkkomaGang/akkoma.git -b stable
cd akkoma
```
### Back up your old database
Change the database name as needed
```bash
pg_dump -d akkoma_prod --format c > akkoma_backup.sql
```
### Getting your static files in the right place
This will vary by every installation. Copy your `instance` directory to `instance/` in
the akkoma source directory - this is where the docker container will look for it.
For *most* from-source installs it'll already be there.
And the same with `uploads`, make sure your uploads (if you have them on disk) are
located at `uploads/` in the akkoma source directory.
If you have them on a different disk, you will need to mount that disk into the docker-compose file,
with an entry that looks like this:
```yaml
akkoma:
volumes:
- .:/opt/akkoma # This should already be there
- type: bind
source: /path/to/your/uploads
target: /opt/akkoma/uploads
```
### Set up basic configuration
```bash
cp docker-resources/env.example .env
echo "DOCKER_USER=$(id -u):$(id -g)" >> .env
```
This probably won't need to be changed, it's only there to set basic environment
variables for the docker-compose file.
=== "From source"
You probably won't need to change your config. Provided your `config/prod.secret.exs` file
is still there, you're all good.
=== "OTP"
```bash
cp /etc/akkoma/config.exs config/prod.secret.exs
```
**BOTH**
Set the following config in `config/prod.secret.exs`:
```elixir
config :pleroma, Pleroma.Web.Endpoint,
...,
http: [ip: {0, 0, 0, 0}, port: 4000]
config :pleroma, Pleroma.Repo,
...,
username: "akkoma",
password: "akkoma",
database: "akkoma",
hostname: "db"
```
### Building the container
The container provided is a thin wrapper around akkoma's dependencies,
it does not contain the code itself. This is to allow for easy updates
and debugging if required.
```bash
./docker-resources/build.sh
```
This will generate a container called `akkoma` which we can use
in our compose environment.
### Setting up the docker resources
```bash
# These won't exist if you're migrating from OTP
rm -rf deps
rm -rf _build
```
```bash
mkdir pgdata
./docker-resources/manage.sh mix deps.get
./docker-resources/manage.sh mix compile
```
### Setting up the database
Now we can import our database to the container.
```bash
docker-compose run --rm --user akkoma -d db
docker-compose run --rm akkoma pg_restore -v -U akkoma -j $(grep -c ^processor /proc/cpuinfo) -d akkoma -h db akkoma_backup.sql
```
### Reverse proxies
If you're just reusing your old proxy, you may have to uncomment the line in
the docker-compose file under `ports`. You'll find it.
Otherwise, you can use the same setup as the [docker installation guide](./docker_en.md#reverse-proxies).
### Let's go
```bash
docker-compose up -d
```
You should now be at the same point as you were before, but with a docker install.
{! installation/frontends.include !}
See the [docker installation guide](./docker_en.md) for more information on how to
update.
#### Further reading
{! installation/further_reading.include !}
{! support.include !}

View file

@ -202,8 +202,6 @@ incorrect timestamps. You should have ntpd running.
* <https://catgirl.science>
{! installation/frontends.include !}
#### Further reading
{! installation/further_reading.include !}

View file

@ -12,11 +12,9 @@ For any additional information regarding commands and configuration files mentio
To install them, run the following command (with doas or as root):
```
pkg_add elixir gmake git postgresql-server postgresql-contrib cmake ffmpeg ImageMagick erlang-wx-25
pkg_add elixir gmake git postgresql-server postgresql-contrib cmake ffmpeg ImageMagick
```
(Note that the erlang version may change, it was 25 at the time of writing)
Akkoma requires a reverse proxy, OpenBSD has relayd in base (and is used in this guide) and packages/ports are available for nginx (www/nginx) and apache (www/apache-httpd). Independently of the reverse proxy, [acme-client(1)](https://man.openbsd.org/acme-client) can be used to get a certificate from Let's Encrypt.
#### Optional software
@ -162,14 +160,15 @@ http protocol plerup { # Protocol for upstream akkoma server
match request header append "X-Forwarded-For" value "$REMOTE_ADDR" # This two header and the next one are not strictly required by akkoma but adding them won't hurt
match request header append "X-Forwarded-By" value "$SERVER_ADDR:$SERVER_PORT"
match response header append "X-XSS-Protection" value "0"
match response header append "X-XSS-Protection" value "1; mode=block"
match response header append "X-Permitted-Cross-Domain-Policies" value "none"
match response header append "X-Frame-Options" value "DENY"
match response header append "X-Content-Type-Options" value "nosniff"
match response header append "Referrer-Policy" value "same-origin"
match response header append "Content-Security-Policy" value "default-src 'none'; base-uri 'none'; form-action 'self'; img-src 'self' data: https:; media-src 'self' https:; style-src 'self' 'unsafe-inline'; font-src 'self'; script-src 'self'; connect-src 'self' wss://CHANGEME.tld; upgrade-insecure-requests;" # Modify "CHANGEME.tld" and set your instance's domain here
match response header append "X-Download-Options" value "noopen"
match response header append "Content-Security-Policy" value "default-src 'none'; base-uri 'self'; form-action 'self'; img-src 'self' data: https:; media-src 'self' https:; style-src 'self' 'unsafe-inline'; font-src 'self'; script-src 'self'; connect-src 'self' wss://CHANGEME.tld; upgrade-insecure-requests;" # Modify "CHANGEME.tld" and set your instance's domain here
match request header append "Connection" value "upgrade"
#match response header append "Strict-Transport-Security" value "max-age=63072000; includeSubDomains; preload" # Uncomment this only after you get HTTPS working.
#match response header append "Strict-Transport-Security" value "max-age=31536000; includeSubDomains" # Uncomment this only after you get HTTPS working.
# If you do not want remote frontends to be able to access your Akkoma backend server, comment these lines
match response header append "Access-Control-Allow-Origin" value "*"
@ -251,8 +250,6 @@ If your instance is up and running, you can create your first user with administ
LC_ALL=en_US.UTF-8 MIX_ENV=prod mix pleroma.user new <username> <your@emailaddress> --admin
```
{! installation/frontends.include !}
#### Further reading
{! installation/further_reading.include !}

View file

@ -0,0 +1,121 @@
# Akkoman asennus OpenBSD:llä
Tarvitset:
* Oman domainin
* OpenBSD 6.3 -serverin
* Auttavan ymmärryksen unix-järjestelmistä
Komennot, joiden edessä on '#', tulee ajaa käyttäjänä `root`. Tämä on
suositeltavaa tehdä komennon `doas` avulla, katso `doas (1)` ja `doas.conf (5)`.
Tästä eteenpäin oletuksena on, että domain "esimerkki.com" osoittaa
serverin IP-osoitteeseen.
Jos asennuksen kanssa on ongelmia, IRC-kanava #pleroma Libera.chat tai
Matrix-kanava #pleroma:libera.chat ovat hyviä paikkoja löytää apua
(englanniksi), `/msg eal kukkuu` jos haluat välttämättä puhua härmää.
Asenna tarvittava ohjelmisto:
`# pkg_add git elixir gmake postgresql-server-10.3 postgresql-contrib-10.3 cmake ffmpeg ImageMagick`
#### Optional software
[`docs/installation/optional/media_graphics_packages.md`](../installation/optional/media_graphics_packages.md):
* ImageMagick
* ffmpeg
* exiftool
Asenna tarvittava ohjelmisto:
`# pkg_add ImageMagick ffmpeg p5-Image-ExifTool`
Luo postgresql-tietokanta:
`# su - _postgresql`
`$ mkdir /var/postgresql/data`
`$ initdb -D /var/postgresql/data -E UTF8`
`$ createdb`
Käynnistä tietokanta ja aseta se käynnistymään automaattisesti.
`# rcctl start postgresql`
`# rcctl enable postgresql`
Luo käyttäjä akkomaa varten (kysyy muutaman kysymyksen):
`# adduser akkoma`
Vaihda akkoma-käyttäjään ja mene kotihakemistoosi:
`# su - akkoma`
Lataa akkoman lähdekoodi:
`$ git clone https://akkoma.dev/AkkomaGang/akkoma.git`
`$ cd akkoma`
Asenna tarvittavat elixir-kirjastot:
`$ mix deps.get`
`$ mix deps.compile`
Luo tarvittava konfiguraatio:
`$ mix generate_config`
`$ cp config/generated_config.exs config/prod.secret.exs`
Aja luodut tietokantakomennot:
`# su _postgres -c 'psql -f config/setup_db.psql'`
`$ MIX_ENV=prod mix ecto.migrate`
Käynnistä akkoma-prosessi:
`$ MIX_ENV=prod mix compile`
`$ MIX_ENV=prod mix phx.server`
Tässä vaiheessa on hyvä tarkistaa että asetukset ovat oikein. Avaa selaimella,
curlilla tai vastaavalla työkalulla `esimerkki.com:4000/api/v1/instance` ja katso
että kohta "uri" on "https://esimerkki.com".
Huom! Muista varmistaa että muuttuja MIX_ENV on "prod" mix-komentoja ajaessasi.
Mix lukee oikean konfiguraatiotiedoston sen mukaisesti.
Ohessa enimmäkseen toimivaksi todettu rc.d-skripti akkoman käynnistämiseen.
Kirjoita se tiedostoon /etc/rc.d/akkoma. Tämän jälkeen aja
`# chmod +x /etc/rc.d/akkoma`, ja voit käynnistää akkoman komennolla
`# /etc/rc.d/akkoma start`.
```
#!/bin/ksh
#/etc/rc.d/akkoma
daemon="cd /home/akkoma/akkoma;MIX_ENV=prod /usr/local/bin/elixir"
daemon_flags="--detached /usr/local/bin/mix phx.server"
daemon_user="akkoma"
rc_reload="NO"
rc_bg="YES"
pexp="beam"
. /etc/rc.d/rc.subr
rc_cmd $1
```
Tämän jälkeen tarvitset enää HTTP-serverin välittämään kutsut akkoma-prosessille.
Tiedostosta `install/akkoma.nginx` löytyy esimerkkikonfiguraatio, ja TLS-sertifikaatit
saat ilmaiseksi esimerkiksi [letsencryptiltä](https://certbot.eff.org/lets-encrypt/opbsd-nginx.html).
Nginx asentuu yksinkertaisesti komennolla `# pkg_add nginx`.
Kun olet valmis, avaa https://esimerkki.com selaimessasi. Luo käyttäjä ja seuraa kiinnostavia
tyyppejä muilla palvelimilla!

View file

@ -6,7 +6,6 @@ This guide covers a installation using an OTP release. To install Akkoma from so
## Pre-requisites
* A machine running Linux with GNU (e.g. Debian, Ubuntu) or musl (e.g. Alpine) libc and `x86_64`, `aarch64` or `armv7l` CPU, you have root access to. If you are not sure if it's compatible see [Detecting flavour section](#detecting-flavour) below
* For installing OTP releases on RedHat-based distros like Fedora and Centos Stream, please follow [this guide](./otp_redhat_en.md) instead.
* A (sub)domain pointed to the machine
You will be running commands as root. If you aren't root already, please elevate your priviledges by executing `sudo su`/`su`.
@ -15,19 +14,12 @@ While in theory OTP releases are possbile to install on any compatible machine,
### Detecting flavour
This is a little more complex than it used to be (thanks ubuntu)
Paste the following into the shell:
```sh
arch="$(uname -m)";if [ "$arch" = "x86_64" ];then arch="amd64";elif [ "$arch" = "armv7l" ];then arch="arm";elif [ "$arch" = "aarch64" ];then arch="arm64";else echo "Unsupported arch: $arch">&2;fi;if getconf GNU_LIBC_VERSION>/dev/null;then libc_postfix="";elif [ "$(ldd 2>&1|head -c 9)" = "musl libc" ];then libc_postfix="-musl";elif [ "$(find /lib/libc.musl*|wc -l)" ];then libc_postfix="-musl";else echo "Unsupported libc">&2;fi;echo "$arch$libc_postfix"
```
Use the following mapping to figure out your flavour:
| distribution | flavour | available branches |
| ------------- | ------------------ | ------------------- |
| debian stable | amd64 | develop, stable |
| ubuntu focal | amd64 | develop, stable |
| ubuntu jammy | amd64-ubuntu-jammy | develop, stable |
| alpine | amd64-musl | stable |
Other similar distributions will _probably_ work, but if it is not listed above, there is no official
support.
If your platform is supported the output will contain the flavour string, you will need it later. If not, this just means that we don't build releases for your platform, you can still try installing from source.
### Installing the required packages
@ -123,7 +115,7 @@ export FLAVOUR="amd64-musl"
# Clone the release build into a temporary directory and unpack it
su akkoma -s $SHELL -lc "
curl 'https://akkoma-updates.s3-website.fr-par.scw.cloud/stable/akkoma-$FLAVOUR.zip' -o /tmp/akkoma.zip
curl 'https://akkoma-updates.s3-website.fr-par.scw.cloud/develop/akkoma-$FLAVOUR.zip' -o /tmp/akkoma.zip
unzip /tmp/akkoma.zip -d /tmp/
"
@ -306,8 +298,6 @@ su akkoma -s $SHELL -lc "./bin/pleroma_ctl user new joeuser joeuser@sld.tld --ad
```
This will create an account withe the username of 'joeuser' with the email address of joeuser@sld.tld, and set that user's account as an admin. This will result in a link that you can paste into the browser, which logs you in and enables you to set the password.
{! installation/frontends.include !}
## Further reading
{! installation/further_reading.include !}

View file

@ -1,286 +0,0 @@
# Installing on RedHat using OTP releases
## OTP releases and Fedora/RedHat
The current OTP builds available for Linux are unfortunately incompatible with RedHat Linux distributions, like Fedora and Centos Stream. This is due to RedHat maintaining patched versions of certain Erlang libraries, making them incompatible with other Linux distributions.
However, you may compile your own OTP release from scratch. This is particularly useful if you wish to quickly distribute your OTP build onto multiple systems, without having to worry about compiling code on every system. However, if your goal is to simply set up a single instance for yourself, installing from-source might be a simpler option. To install from-source, please follow [this guide](./fedora_based_en.md).
## Pre-requisites
In order to compile a RedHat-compatible OTP release, you will need to run a RedHat Linux distribution. This guide will assume you run Fedora 36, though it should also work on older Fedora releases and other RedHat distributions. It also assumes that you have administrative rights and sufficient knowledge on how to perform common CLI tasks in Linux. If you want to run this guide with root, ignore the `sudo` at the beginning of the lines.
Important: keep in mind that you must build your OTP release for the specific RedHat distribution you wish to use it on. A build on Fedora will only be compatible with a specific Fedora release version.
## Building an OTP release for Fedora 36
### Installing required packages
* First, update your system, if not already done:
```shell
sudo dnf upgrade --refresh
```
* Then install the required packages to build your OTP release:
```shell
sudo dnf install git gcc g++ erlang elixir erlang-os_mon erlang-eldap erlang-xmerl erlang-erl_interface erlang-syntax_tools make cmake file-devel
```
### Preparing the project files
* Git clone the AkkomaBE repository. This can be done anywhere:
```shell
cd ~
git clone https://akkoma.dev/AkkomaGang/akkoma.git
```
* Change to the new directory:
```shell
cd ./akkoma
```
### Building the OTP release
* Run the following commands:
```shell
export MIX_ENV=prod
echo "import Config" > config/prod.secret.exs
mix local.hex --force
mix local.rebar --force
mix deps.get --only prod
mkdir release
mix release --path release
```
Note that compiling the OTP release will take some time. Once it completes, you will find the OTP files in the directory `release`.
If all went well, you will have built your very own Fedora-compatible OTP release! You can now pack up the files in the `release` directory and deploy them to your other Fedora servers.
## Installing the OTP release
Installing the OTP release from this point onward will be very similar to the regular OTP release. This guide assumes you will want to install your OTP package on other systems, so additional pre-requisites will be listed below.
Please note that running your own OTP release has some minor caveats that you should be aware of. They will be listed below as well.
### Installing required packages
Other than things bundled in the OTP release Akkoma depends on:
* curl (to download the release build)
* ncurses (ERTS won't run without it)
* PostgreSQL (also utilizes extensions in postgresql-contrib)
* nginx (could be swapped with another reverse proxy but this guide covers only it)
* certbot (for Let's Encrypt certificates, could be swapped with another ACME client, but this guide covers only it)
* libmagic/file
First, update your system, if not already done:
```shell
sudo dnf upgrade --refresh
```
Then install the required packages:
```shell
sudo dnf install curl ncurses postgresql postgresql-contrib nginx certbot file-devel
```
### Optional packages: [`docs/installation/optional/media_graphics_packages.md`](../installation/optional/media_graphics_packages.md)
* Install ffmpeg (requires setting up the RPM-fusion repositories):
```shell
sudo dnf -y install https://download1.rpmfusion.org/free/fedora/rpmfusion-free-release-$(rpm -E %fedora).noarch.rpm
sudo dnf -y install https://download1.rpmfusion.org/nonfree/fedora/rpmfusion-nonfree-release-$(rpm -E %fedora).noarch.rpm
sudo dnf install ffmpeg
```
* Install ImageMagick and ExifTool for image manipulation:
```shell
sudo dnf install Imagemagick perl-Image-ExifTool
```
### Configuring PostgreSQL
#### (Optional) Performance configuration
It is encouraged to check [Optimizing your PostgreSQL performance](../configuration/postgresql.md) document, for tips on PostgreSQL tuning.
Restart PostgreSQL to apply configuration changes:
```shell
sudo systemctl restart postgresql
```
### Installing Akkoma
```sh
# Create a Akkoma user
adduser --system --shell /bin/false --home /opt/akkoma akkoma
# Move your custom OTP release to the home directory
sudo -Hu akkoma mv /your/custom/otp/release /opt/akkoma
# Create uploads directory and set proper permissions (skip if planning to use a remote uploader)
# Note: It does not have to be `/var/lib/akkoma/uploads`, the config generator will ask about the upload directory later
sudo mkdir -p /var/lib/akkoma/uploads
sudo chown -R akkoma /var/lib/akkoma
# Create custom public files directory (custom emojis, frontend bundle overrides, robots.txt, etc.)
# Note: It does not have to be `/var/lib/akkoma/static`, the config generator will ask about the custom public files directory later
sudo mkdir -p /var/lib/akkoma/static
sudo chown -R akkoma /var/lib/akkoma
# Create a config directory
sudo mkdir -p /etc/akkoma
sudo chown -R akkoma /etc/akkoma
# Run the config generator
sudo -Hu akkoma ./bin/pleroma_ctl instance gen --output /etc/akkoma/config.exs --output-psql /tmp/setup_db.psql
# Create the postgres database
sudo -Hu postgres psql -f /tmp/setup_db.psql
# Create the database schema
sudo -Hu akkoma ./bin/pleroma_ctl migrate
# Start the instance to verify that everything is working as expected
sudo -Hu akkoma ./bin/pleroma daemon
# Wait for about 20 seconds and query the instance endpoint, if it shows your uri, name and email correctly, you are configured correctly
sleep 20 && curl http://localhost:4000/api/v1/instance
# Stop the instance
sudo -Hu akkoma ./bin/pleroma stop
```
### Setting up nginx and getting Let's Encrypt SSL certificaties
#### Get a Let's Encrypt certificate
```shell
certbot certonly --standalone --preferred-challenges http -d yourinstance.tld
```
#### Copy Akkoma nginx configuration to the nginx folder
```shell
cp /opt/akkoma/installation/nginx/akkoma.nginx /etc/nginx/conf.d/akkoma.conf
```
#### Edit the nginx config
```shell
# Replace example.tld with your (sub)domain (replace $EDITOR with your editor of choice)
sudo $EDITOR /etc/nginx/conf.d/akkoma.conf
# Verify that the config is valid
sudo nginx -t
```
#### Start nginx
```shell
sudo systemctl start nginx
```
At this point if you open your (sub)domain in a browser you should see a 502 error, that's because Akkoma is not started yet.
### Setting up a system service
```shell
# Copy the service into a proper directory
cp /opt/akkoma/installation/akkoma.service /etc/systemd/system/akkoma.service
# Edit the service file and make any neccesary changes
sudo $EDITOR /etc/systemd/system/akkoma.service
# If you use SELinux, set the correct file context on the pleroma binary
sudo semanage fcontext -a -t init_t /opt/akkoma/bin/pleroma
sudo restorecon -v /opt/akkoma/bin/pleroma
# Start akkoma and enable it on boot
sudo systemctl start akkoma
sudo systemctl enable akkoma
```
If everything worked, you should see a response from Akkoma-BE when visiting your domain. You may need to install frontends like Akkoma-FE and Admin-FE; refer to [this guide](../administration/CLI_tasks/frontend.md) on how to install them.
If that didn't happen, try reviewing the installation steps, starting Akkoma in the foreground and seeing if there are any errrors.
{! support.include !}
## Post installation
### Setting up auto-renew of the Let's Encrypt certificate
```shell
# Create the directory for webroot challenges
sudo mkdir -p /var/lib/letsencrypt
# Uncomment the webroot method
sudo $EDITOR /etc/nginx/conf.d/akkoma.conf
# Verify that the config is valid
sudo nginx -t
# Restart nginx
sudo systemctl restart nginx
# Ensure the webroot menthod and post hook is working
sudo certbot renew --cert-name yourinstance.tld --webroot -w /var/lib/letsencrypt/ --dry-run --post-hook 'systemctl reload nginx'
# Add it to the daily cron
echo '#!/bin/sh
certbot renew --cert-name yourinstance.tld --webroot -w /var/lib/letsencrypt/ --post-hook "systemctl reload nginx"
' > /etc/cron.daily/renew-akkoma-cert
sudo chmod +x /etc/cron.daily/renew-akkoma-cert
# If everything worked the output should contain /etc/cron.daily/renew-akkoma-cert
sudo run-parts --test /etc/cron.daily
```
## Create your first user and set as admin
```shell
cd /opt/akkoma
sudo -Hu akkoma ./bin/pleroma_ctl user new joeuser joeuser@sld.tld --admin
```
This will create an account withe the username of 'joeuser' with the email address of joeuser@sld.tld, and set that user's account as an admin. This will result in a link that you can paste into the browser, which logs you in and enables you to set the password.
## Further reading
### Caveats of building your own OTP release
There are some things to take note of when your are running your own OTP builds.
#### Updating your OTP builds
Using your custom OTP build, you will not be able to update the installation using the `pleroma_ctl update` command. Running this command would overwrite your install with an OTP release from the main Akkoma repository, which will break your install.
Instead, you will have to rebuild your OTP release every time there are updates, then manually move it to where your Akkoma installation is running, overwriting the old OTP release files. Make sure to stop the Akkoma-BE server before overwriting any files!
After that, run the `pleroma_ctl migrate` command as usual to perform database migrations.
#### Cross-compatibility between RedHat distributions
As it currently stands, your OTP build will only be compatible for the specific RedHat distribution you've built it on. Fedora builds only work on Fedora, Centos builds only on Centos, RedHat builds only on RedHat. Secondly, for Fedora, they will also be bound to the specific Fedora release. This is because different releases of Fedora may have significant changes made in some of the required packages and libraries.
{! installation/frontends.include !}
{! installation/further_reading.include !}
{! support.include !}

View file

@ -1,66 +0,0 @@
# Verifying OTP release integrity
All stable OTP releases are cryptographically signed, to allow
you to verify the integrity if you choose to.
Releases are signed with [Signify](https://man.openbsd.org/signify.1),
with [the public key in the main repository](https://akkoma.dev/AkkomaGang/akkoma/src/branch/stable/SIGNING_KEY.pub)
Release URLs will always be of the form
```
https://akkoma-updates.s3-website.fr-par.scw.cloud/{branch}/akkoma-{flavour}.zip
```
Where branch is usually `stable` or `develop`, and `flavour` is
the one [that you detect on install](../otp_en/#detecting-flavour).
So, for an AMD64 stable install, your update URL will be
```
https://akkoma-updates.s3-website.fr-par.scw.cloud/stable/akkoma-amd64.zip
```
To verify the integrity of this file, we have two helper files
```
# Checksums
https://akkoma-updates.s3-website.fr-par.scw.cloud/{branch}/akkoma-{flavour}.zip.sha256
# Signify signature of the hashes
https://akkoma-updates.s3-website.fr-par.scw.cloud/{branch}/akkoma-{flavour}.zip.sha256.sig
```
Thus, to upgrade manually, with integrity checking, consider the following script:
```bash
#!/bin/bash
set -eo pipefail
export FLAVOUR=amd64
export BRANCH=stable
# Fetch signing key
curl --silent https://akkoma.dev/AkkomaGang/akkoma/raw/branch/$BRANCH/SIGNING_KEY.pub -o AKKOMA_SIGNING_KEY.pub
# Download zip file and sig files
wget -q https://akkoma-updates.s3-website.fr-par.scw.cloud/$BRANCH/akkoma-$FLAVOUR{.zip,.zip.sha256,.zip.sha256.sig}
# Verify zip file's sha256 integrity
sha256sum --check akkoma-$FLAVOUR.zip.sha256
# Verify hash file's integrity
# Signify might be under the `signify` command, depending on your distribution
signify-openbsd -V -p AKKOMA_SIGNING_KEY.pub -m akkoma-$FLAVOUR.zip.sha256
# We're good, use that URL
echo "Update URL contents verified"
echo "use"
echo "./bin/pleroma_ctl update --zip-url https://akkoma-updates.s3-website.fr-par.scw.cloud/$BRANCH/akkoma-$FLAVOUR"
echo "to update your instance"
# Clean up
rm akkoma-$FLAVOUR.zip
rm akkoma-$FLAVOUR.zip.sha256
rm akkoma-$FLAVOUR.zip.sha256.sig
```

View file

@ -1,24 +1,21 @@
site_name: Akkoma Documentation
theme:
favicon: 'images/favicon.ico'
favicon: 'images/akko_badday.png'
name: 'material'
custom_dir: 'theme'
# Disable google fonts
font: false
logo: 'images/logo.png'
logo: 'images/akko_badday.png'
features:
- navigation.tabs
- toc.follow
- navigation.instant
- navigation.sections
- tabs
palette:
primary: 'deep purple'
accent: 'blue grey'
extra_css:
- css/extra.css
repo_name: 'AkkomaGang/akkoma'
repo_url: 'https://akkoma.dev/AkkomaGang/akkoma'
repo_name: 'AkkomaGang/docs'
repo_url: 'https://akkoma.dev/AkkomaGang/docs'
extra:
repo_icon: gitea
@ -34,8 +31,7 @@ markdown_extensions:
- pymdownx.tasklist:
custom_checkbox: true
- pymdownx.superfences
- pymdownx.tabbed:
alternate_style: true
- pymdownx.tabbed
- pymdownx.details
- markdown_include.include:
base_path: docs

View file

@ -1,26 +1,22 @@
certifi==2022.9.24
charset-normalizer==2.1.1
click==8.1.3
ghp-import==2.1.0
idna==3.4
importlib-metadata==4.12.0
Jinja2==3.1.2
Markdown==3.3.7
markdown-include==0.7.0
markdown-include==0.6.0
MarkupSafe==2.1.1
mergedeep==1.3.4
mkdocs==1.4.2
mkdocs-material==8.5.9
mkdocs-material-extensions==1.1
mkdocs==1.3.0
mkdocs-bootswatch==1.1
mkdocs-material==8.1.8
mkdocs-material-extensions==1.0.3
packaging==21.3
Pygments==2.13.0
pymdown-extensions==9.8
Pygments==2.11.2
pymdown-extensions==9.1
pyparsing==3.0.9
python-dateutil==2.8.2
PyYAML==6.0
pyyaml_env_tag==0.1
requests==2.28.1
six==1.16.0
urllib3==1.26.12
watchdog==2.1.9
zipp==3.8.0

View file

@ -7,9 +7,6 @@ ExecReload=/bin/kill $MAINPID
KillMode=process
Restart=on-failure
; Uncomment this if you're on Arch Linux
; Evironment="PATH=/usr/local/sbin:/usr/local/bin:/usr/bin:/usr/bin/site_perl:/usr/bin/vendor_perl:/usr/bin/core_perl"
; Name of the user that runs the Akkoma service.
User=akkoma
; Declares that Akkoma runs in production mode.

View file

@ -1,5 +1,4 @@
# Recommended varnishncsa logging format: '%h %l %u %t "%m %{X-Forwarded-Proto}i://%{Host}i%U%q %H" %s %b "%{Referer}i" "%{User-agent}i"'
# Please use Varnish 7.0+ for proper Range Requests / Chunked encoding support
vcl 4.1;
import std;
@ -23,6 +22,11 @@ sub vcl_recv {
set req.http.X-Forwarded-Proto = "https";
}
# CHUNKED SUPPORT
if (req.http.Range ~ "bytes=") {
set req.http.x-range = req.http.Range;
}
# Pipe if WebSockets request is coming through
if (req.http.upgrade ~ "(?i)websocket") {
return (pipe);
@ -31,9 +35,9 @@ sub vcl_recv {
# Allow purging of the cache
if (req.method == "PURGE") {
if (!client.ip ~ purge) {
return (synth(405,"Not allowed."));
return(synth(405,"Not allowed."));
}
return (purge);
return(purge);
}
}
@ -49,11 +53,17 @@ sub vcl_backend_response {
return (retry);
}
# CHUNKED SUPPORT
if (bereq.http.x-range ~ "bytes=" && beresp.status == 206) {
set beresp.ttl = 10m;
set beresp.http.CR = beresp.http.content-range;
}
# Bypass cache for large files
# 50000000 ~ 50MB
if (std.integer(beresp.http.content-length, 0) > 50000000) {
set beresp.uncacheable = true;
return (deliver);
return(deliver);
}
# Don't cache objects that require authentication
@ -84,7 +94,7 @@ sub vcl_synth {
if (resp.status == 750) {
set resp.status = 301;
set resp.http.Location = req.http.x-redir;
return (deliver);
return(deliver);
}
}
@ -96,12 +106,25 @@ sub vcl_pipe {
}
}
sub vcl_hash {
# CHUNKED SUPPORT
if (req.http.x-range ~ "bytes=") {
hash_data(req.http.x-range);
unset req.http.Range;
}
}
sub vcl_backend_fetch {
# Be more lenient for slow servers on the fediverse
if (bereq.url ~ "^/proxy/") {
set bereq.first_byte_timeout = 300s;
}
# CHUNKED SUPPORT
if (bereq.http.x-range) {
set bereq.http.Range = bereq.http.x-range;
}
if (bereq.retries == 0) {
# Clean up the X-Varnish-Backend-503 flag that is used internally
# to mark broken backend responses that should be retried.
@ -120,6 +143,14 @@ sub vcl_backend_fetch {
}
}
sub vcl_deliver {
# CHUNKED SUPPORT
if (resp.http.CR) {
set resp.http.Content-Range = resp.http.CR;
unset resp.http.CR;
}
}
sub vcl_backend_error {
# Retry broken backend responses.
set bereq.http.X-Varnish-Backend-503 = "1";

View file

@ -0,0 +1,48 @@
#!/bin/sh
# Pleroma: A lightweight social networking server
# Copyright © 2017-2021 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only
project_id="74"
project_branch="rebase/glitch-soc"
static_dir="instance/static"
# For bundling:
# project_branch="pleroma"
# static_dir="priv/static"
if [ ! -d "${static_dir}" ]
then
echo "Error: ${static_dir} directory is missing, are you sure you are running this script at the root of pleromas repository?"
exit 1
fi
last_modified="$(curl --fail -s -I 'https://git.pleroma.social/api/v4/projects/'${project_id}'/jobs/artifacts/'${project_branch}'/download?job=build' | grep '^Last-Modified:' | cut -d: -f2-)"
echo "branch:${project_branch}"
echo "Last-Modified:${last_modified}"
artifact="mastofe.zip"
if [ "${last_modified}x" = "x" ]
then
echo "ERROR: Couldn't get the modification date of the latest build archive, maybe it expired, exiting..."
exit 1
fi
if [ -e mastofe.timestamp ] && [ "$(cat mastofe.timestamp)" = "${last_modified}" ]
then
echo "MastoFE is up-to-date, exiting..."
exit 0
fi
curl --fail -c - "https://git.pleroma.social/api/v4/projects/${project_id}/jobs/artifacts/${project_branch}/download?job=build" -o "${artifact}" || exit
# TODO: Update the emoji as well
rm -fr "${static_dir}/sw.js" "${static_dir}/packs" || exit
unzip -q "${artifact}" || exit
cp public/assets/sw.js "${static_dir}/sw.js" || exit
cp -r public/packs "${static_dir}/packs" || exit
echo "${last_modified}" > mastofe.timestamp
rm -fr public
rm -i "${artifact}"

View file

@ -23,15 +23,7 @@ def start_pleroma do
Pleroma.Config.Oban.warn()
Pleroma.Application.limiters_setup()
Application.put_env(:phoenix, :serve_endpoints, false, persistent: true)
proxy_url = Pleroma.Config.get([:http, :proxy_url])
proxy = Pleroma.HTTP.AdapterHelper.format_proxy(proxy_url)
finch_config =
[:http, :adapter]
|> Pleroma.Config.get([])
|> Pleroma.HTTP.AdapterHelper.maybe_add_proxy_pool(proxy)
|> Keyword.put(:name, MyFinch)
Finch.start_link(name: MyFinch)
unless System.get_env("DEBUG") do
Logger.remove_backend(:console)
@ -53,7 +45,6 @@ def start_pleroma do
Pleroma.Emoji,
{Pleroma.Config.TransferTask, false},
Pleroma.Web.Endpoint,
{Finch, finch_config},
{Oban, oban_config},
{Majic.Pool,
[name: Pleroma.MajicPool, pool_size: Pleroma.Config.get([:majic_pool, :size], 2)]}

View file

@ -110,14 +110,6 @@ def run(["prune_objects" | args]) do
end
end
def run(["prune_task"]) do
start_pleroma()
nil
|> Pleroma.Workers.Cron.PruneDatabaseWorker.perform()
|> IO.inspect()
end
def run(["fix_likes_collections"]) do
start_pleroma()

View file

@ -1,77 +0,0 @@
defmodule Mix.Tasks.Pleroma.Diagnostics do
alias Pleroma.Repo
alias Pleroma.User
require Logger
require Pleroma.Constants
import Mix.Pleroma
import Ecto.Query
use Mix.Task
def run(["home_timeline", nickname]) do
start_pleroma()
user = Repo.get_by!(User, nickname: nickname)
Logger.info("Home timeline query #{user.nickname}")
followed_hashtags =
user
|> User.followed_hashtags()
|> Enum.map(& &1.id)
params =
%{limit: 20}
|> Map.put(:type, ["Create", "Announce"])
|> Map.put(:blocking_user, user)
|> Map.put(:muting_user, user)
|> Map.put(:reply_filtering_user, user)
|> Map.put(:announce_filtering_user, user)
|> Map.put(:user, user)
|> Map.put(:followed_hashtags, followed_hashtags)
|> Map.delete(:local)
list_memberships = Pleroma.List.memberships(user)
recipients = [user.ap_id | User.following(user)]
query =
Pleroma.Web.ActivityPub.ActivityPub.fetch_activities_query(
recipients ++ list_memberships,
params
)
|> limit(20)
Ecto.Adapters.SQL.explain(Repo, :all, query, analyze: true, timeout: :infinity)
|> IO.puts()
end
def run(["user_timeline", nickname, reading_nickname]) do
start_pleroma()
user = Repo.get_by!(User, nickname: nickname)
reading_user = Repo.get_by!(User, nickname: reading_nickname)
Logger.info("User timeline query #{user.nickname}")
params =
%{limit: 20}
|> Map.put(:type, ["Create", "Announce"])
|> Map.put(:user, reading_user)
|> Map.put(:actor_id, user.ap_id)
|> Map.put(:pinned_object_ids, Map.keys(user.pinned_objects))
list_memberships = Pleroma.List.memberships(user)
recipients =
%{
godmode: params[:godmode],
reading_user: reading_user
}
|> Pleroma.Web.ActivityPub.ActivityPub.user_activities_recipients()
query =
(recipients ++ list_memberships)
|> Pleroma.Web.ActivityPub.ActivityPub.fetch_activities_query(params)
|> limit(20)
Ecto.Adapters.SQL.explain(Repo, :all, query, analyze: true, timeout: :infinity)
|> IO.puts()
end
end

View file

@ -59,7 +59,7 @@ def run(["gen" | rest]) do
get_option(
options,
:domain,
"What domain will your instance use? (e.g akkoma.example.com)"
"What domain will your instance use? (e.g pleroma.soykaf.com)"
),
":"
) ++ [443]

View file

@ -9,7 +9,7 @@ defmodule Mix.Tasks.Pleroma.Search.Meilisearch do
import Ecto.Query
import Pleroma.Search.Meilisearch,
only: [meili_put: 2, meili_get: 1, meili_delete!: 1]
only: [meili_post: 2, meili_put: 2, meili_get: 1, meili_delete!: 1]
def run(["index"]) do
start_pleroma()
@ -27,7 +27,7 @@ def run(["index"]) do
end
{:ok, _} =
meili_put(
meili_post(
"/indexes/objects/settings/ranking-rules",
[
"published:desc",
@ -41,7 +41,7 @@ def run(["index"]) do
)
{:ok, _} =
meili_put(
meili_post(
"/indexes/objects/settings/searchable-attributes",
[
"content"
@ -91,7 +91,7 @@ def run(["index"]) do
)
with {:ok, res} <- result do
if not Map.has_key?(res, "indexUid") do
if not Map.has_key?(res, "uid") do
IO.puts("\nFailed to index: #{inspect(result)}")
end
else

View file

@ -113,11 +113,9 @@ def run(["reset_password", nickname]) do
{:ok, token} <- Pleroma.PasswordResetToken.create_token(user) do
shell_info("Generated password reset token for #{user.nickname}")
IO.puts(
"URL: #{Pleroma.Web.Router.Helpers.reset_password_url(Pleroma.Web.Endpoint,
:reset,
token.token)}"
)
IO.puts("URL: #{Pleroma.Web.Router.Helpers.reset_password_url(Pleroma.Web.Endpoint,
:reset,
token.token)}")
else
_ ->
shell_error("No local user #{nickname}")
@ -260,25 +258,6 @@ def run(["untag", nickname | tags]) do
end
end
def run(["refetch_public_keys"]) do
start_pleroma()
Pleroma.User.Query.build(%{
external: true,
is_active: true
})
|> refetch_public_keys()
end
def run(["refetch_public_keys" | rest]) do
start_pleroma()
Pleroma.User.Query.build(%{
ap_id: rest
})
|> refetch_public_keys()
end
def run(["invite" | rest]) do
{options, [], []} =
OptionParser.parse(rest,
@ -471,15 +450,9 @@ def run(["blocking", nickname]) do
def run(["timeline_query", nickname]) do
start_pleroma()
params = %{local: true}
with %User{local: true} = user <- User.get_cached_by_nickname(nickname) do
followed_hashtags =
user
|> User.followed_hashtags()
|> Enum.map(& &1.id)
params =
params
|> Map.put(:type, ["Create", "Announce"])
@ -490,7 +463,6 @@ def run(["timeline_query", nickname]) do
|> Map.put(:announce_filtering_user, user)
|> Map.put(:user, user)
|> Map.put(:local_only, params[:local])
|> Map.put(:hashtags, followed_hashtags)
|> Map.delete(:local)
_activities =
@ -515,64 +487,6 @@ def run(["list"]) do
|> Stream.run()
end
def run(["fix_follow_state", local_user, remote_user]) do
start_pleroma()
with {:local, %User{} = local} <- {:local, User.get_by_nickname(local_user)},
{:remote, %User{} = remote} <- {:remote, User.get_by_nickname(remote_user)},
{:follow_data, %{data: %{"state" => request_state}}} <-
{:follow_data, Pleroma.Web.ActivityPub.Utils.fetch_latest_follow(local, remote)} do
calculated_state = User.following?(local, remote)
IO.puts(
"Request state is #{request_state}, vs calculated state of following=#{calculated_state}"
)
if calculated_state == false && request_state == "accept" do
IO.puts("Discrepancy found, fixing")
Pleroma.Web.CommonAPI.reject_follow_request(local, remote)
shell_info("Relationship fixed")
else
shell_info("No discrepancy found")
end
else
{:local, _} ->
shell_error("No local user #{local_user}")
{:remote, _} ->
shell_error("No remote user #{remote_user}")
{:follow_data, _} ->
shell_error("No follow data for #{local_user} and #{remote_user}")
end
end
def run(["convert_id", id]) do
{:ok, uuid} = FlakeId.Ecto.Type.dump(id)
{:ok, raw_id} = Ecto.UUID.load(uuid)
shell_info(raw_id)
end
defp refetch_public_keys(query) do
query
|> Pleroma.Repo.chunk_stream(50, :batches)
|> Stream.each(fn users ->
users
|> Enum.each(fn user ->
IO.puts("Re-Resolving: #{user.ap_id}")
with {:ok, user} <- Pleroma.User.fetch_by_ap_id(user.ap_id),
changeset <- Pleroma.User.update_changeset(user),
{:ok, _user} <- Pleroma.User.update_and_set_cache(changeset) do
:ok
else
error -> IO.puts("Could not resolve: #{user.ap_id}, #{inspect(error)}")
end
end)
end)
|> Stream.run()
end
defp set_moderator(user, value) do
{:ok, user} =
user

View file

@ -292,12 +292,6 @@ def get_in_reply_to_activity(%Activity{} = activity) do
get_in_reply_to_activity_from_object(Object.normalize(activity, fetch: false))
end
def get_quoted_activity_from_object(%Object{data: %{"quoteUri" => ap_id}}) do
get_create_by_object_ap_id_with_object(ap_id)
end
def get_quoted_activity_from_object(_), do: nil
def normalize(%Activity{data: %{"id" => ap_id}}), do: get_by_ap_id_with_object(ap_id)
def normalize(%{"id" => ap_id}), do: get_by_ap_id_with_object(ap_id)
def normalize(ap_id) when is_binary(ap_id), do: get_by_ap_id_with_object(ap_id)
@ -367,24 +361,10 @@ def following_requests_for_actor(%User{ap_id: ap_id}) do
|> Repo.all()
end
def follow_activity(%User{ap_id: ap_id}, %User{ap_id: followed_ap_id}) do
Queries.by_type("Follow")
|> where([a], a.actor == ^ap_id)
|> where([a], fragment("?->>'object' = ?", a.data, ^followed_ap_id))
|> where([a], fragment("?->>'state'", a.data) in ["pending", "accept"])
|> Repo.one()
end
def restrict_deactivated_users(query) do
query
|> join(
:inner_lateral,
[activity],
active in fragment(
"SELECT is_active from users WHERE ap_id = ? AND is_active = TRUE",
activity.actor
)
)
deactivated_users_query = from(u in User.Query.build(%{deactivated: true}), select: u.ap_id)
from(activity in query, where: activity.actor not in subquery(deactivated_users_query))
end
defdelegate search(user, query, options \\ []), to: Pleroma.Search.DatabaseSearch

View file

@ -8,40 +8,6 @@ defmodule Pleroma.Activity.HTML do
@cachex Pleroma.Config.get([:cachex, :provider], Cachex)
# We store a list of cache keys related to an activity in a
# separate cache, scrubber_management_cache. It has the same
# size as scrubber_cache (see application.ex). Every time we add
# a cache to scrubber_cache, we update scrubber_management_cache.
#
# The most recent write of a certain key in the management cache
# is the same as the most recent write of any record related to that
# key in the main cache.
# Assuming LRW ( https://hexdocs.pm/cachex/Cachex.Policy.LRW.html ),
# this means when the management cache is evicted by cachex, all
# related records in the main cache will also have been evicted.
defp get_cache_keys_for(activity_id) do
with {:ok, list} when is_list(list) <- @cachex.get(:scrubber_management_cache, activity_id) do
list
else
_ -> []
end
end
defp add_cache_key_for(activity_id, additional_key) do
current = get_cache_keys_for(activity_id)
unless additional_key in current do
@cachex.put(:scrubber_management_cache, activity_id, [additional_key | current])
end
end
def invalidate_cache_for(activity_id) do
keys = get_cache_keys_for(activity_id)
Enum.map(keys, &@cachex.del(:scrubber_cache, &1))
@cachex.del(:scrubber_management_cache, activity_id)
end
def get_cached_scrubbed_html_for_activity(
content,
scrubbers,
@ -53,8 +19,6 @@ def get_cached_scrubbed_html_for_activity(
@cachex.fetch!(:scrubber_cache, key, fn _key ->
object = Object.normalize(activity, fetch: false)
add_cache_key_for(activity.id, key)
HTML.ensure_scrubbed_html(content, scrubbers, object.data["fake"] || false, callback)
end)
end

View file

@ -1,52 +0,0 @@
defmodule Pleroma.Activity.Pruner do
@moduledoc """
Prunes activities from the database.
"""
@cutoff 30
alias Pleroma.Activity
alias Pleroma.Repo
import Ecto.Query
def prune_deletes do
before_time = cutoff()
from(a in Activity,
where: fragment("?->>'type' = ?", a.data, "Delete") and a.inserted_at < ^before_time
)
|> Repo.delete_all(timeout: :infinity)
end
def prune_undos do
before_time = cutoff()
from(a in Activity,
where: fragment("?->>'type' = ?", a.data, "Undo") and a.inserted_at < ^before_time
)
|> Repo.delete_all(timeout: :infinity)
end
def prune_removes do
before_time = cutoff()
from(a in Activity,
where: fragment("?->>'type' = ?", a.data, "Remove") and a.inserted_at < ^before_time
)
|> Repo.delete_all(timeout: :infinity)
end
def prune_stale_follow_requests do
before_time = cutoff()
from(a in Activity,
where:
fragment("?->>'type' = ?", a.data, "Follow") and a.inserted_at < ^before_time and
fragment("?->>'state' = ?", a.data, "reject")
)
|> Repo.delete_all(timeout: :infinity)
end
defp cutoff do
DateTime.utc_now() |> Timex.shift(days: -@cutoff)
end
end

View file

@ -1,100 +0,0 @@
defmodule Pleroma.Akkoma.FrontendSettingsProfile do
use Ecto.Schema
import Ecto.Changeset
import Ecto.Query
alias Pleroma.Repo
alias Pleroma.Config
alias Pleroma.User
@primary_key false
schema "user_frontend_setting_profiles" do
belongs_to(:user, Pleroma.User, primary_key: true, type: FlakeId.Ecto.CompatType)
field(:frontend_name, :string, primary_key: true)
field(:profile_name, :string, primary_key: true)
field(:settings, :map)
field(:version, :integer)
timestamps()
end
def changeset(%__MODULE__{} = struct, attrs) do
struct
|> cast(attrs, [:user_id, :frontend_name, :profile_name, :settings, :version])
|> validate_required([:user_id, :frontend_name, :profile_name, :settings, :version])
|> validate_length(:frontend_name, min: 1, max: 255)
|> validate_length(:profile_name, min: 1, max: 255)
|> validate_version(struct)
|> validate_number(:version, greater_than: 0)
|> validate_settings_length(Config.get([:instance, :max_frontend_settings_json_chars]))
end
def create_or_update(%User{} = user, frontend_name, profile_name, settings, version) do
struct =
case get_by_user_and_frontend_name_and_profile_name(user, frontend_name, profile_name) do
nil ->
%__MODULE__{}
%__MODULE__{} = profile ->
profile
end
struct
|> changeset(%{
user_id: user.id,
frontend_name: frontend_name,
profile_name: profile_name,
settings: settings,
version: version
})
|> Repo.insert_or_update()
end
def get_all_by_user_and_frontend_name(%User{id: user_id}, frontend_name) do
Repo.all(
from(p in __MODULE__, where: p.user_id == ^user_id and p.frontend_name == ^frontend_name)
)
end
def get_by_user_and_frontend_name_and_profile_name(
%User{id: user_id},
frontend_name,
profile_name
) do
Repo.one(
from(p in __MODULE__,
where:
p.user_id == ^user_id and p.frontend_name == ^frontend_name and
p.profile_name == ^profile_name
)
)
end
def delete_profile(profile) do
Repo.delete(profile)
end
defp validate_settings_length(
%Ecto.Changeset{changes: %{settings: settings}} = changeset,
max_length
) do
settings_json = Jason.encode!(settings)
if String.length(settings_json) > max_length do
add_error(changeset, :settings, "is too long")
else
changeset
end
end
defp validate_version(changeset, %{version: nil}), do: changeset
defp validate_version(%Ecto.Changeset{changes: %{version: version}} = changeset, %{
version: prev_version
}) do
if version != prev_version + 1 do
add_error(changeset, :version, "must be incremented by 1")
else
changeset
end
end
end

View file

@ -1,100 +0,0 @@
defmodule Pleroma.Akkoma.Translators.DeepL do
@behaviour Pleroma.Akkoma.Translator
alias Pleroma.HTTP
alias Pleroma.Config
require Logger
defp base_url(:free) do
"https://api-free.deepl.com/v2/"
end
defp base_url(:pro) do
"https://api.deepl.com/v2/"
end
defp api_key do
Config.get([:deepl, :api_key])
end
defp tier do
Config.get([:deepl, :tier])
end
@impl Pleroma.Akkoma.Translator
def languages do
with {:ok, %{status: 200} = source_response} <- do_languages("source"),
{:ok, %{status: 200} = dest_response} <- do_languages("target"),
{:ok, source_body} <- Jason.decode(source_response.body),
{:ok, dest_body} <- Jason.decode(dest_response.body) do
source_resp =
Enum.map(source_body, fn %{"language" => code, "name" => name} ->
%{code: code, name: name}
end)
dest_resp =
Enum.map(dest_body, fn %{"language" => code, "name" => name} ->
%{code: code, name: name}
end)
{:ok, source_resp, dest_resp}
else
{:ok, %{status: status} = response} ->
Logger.warning("DeepL: Request rejected: #{inspect(response)}")
{:error, "DeepL request failed (code #{status})"}
{:error, reason} ->
{:error, reason}
end
end
@impl Pleroma.Akkoma.Translator
def translate(string, from_language, to_language) do
with {:ok, %{status: 200} = response} <-
do_request(api_key(), tier(), string, from_language, to_language),
{:ok, body} <- Jason.decode(response.body) do
%{"translations" => [%{"text" => translated, "detected_source_language" => detected}]} =
body
{:ok, detected, translated}
else
{:ok, %{status: status} = response} ->
Logger.warning("DeepL: Request rejected: #{inspect(response)}")
{:error, "DeepL request failed (code #{status})"}
{:error, reason} ->
{:error, reason}
end
end
defp do_request(api_key, tier, string, from_language, to_language) do
HTTP.post(
base_url(tier) <> "translate",
URI.encode_query(
%{
text: string,
target_lang: to_language,
tag_handling: "html"
}
|> maybe_add_source(from_language),
:rfc3986
),
[
{"authorization", "DeepL-Auth-Key #{api_key}"},
{"content-type", "application/x-www-form-urlencoded"}
]
)
end
defp maybe_add_source(opts, nil), do: opts
defp maybe_add_source(opts, lang), do: Map.put(opts, :source_lang, lang)
defp do_languages(type) do
HTTP.get(
base_url(tier()) <> "languages?type=#{type}",
[
{"authorization", "DeepL-Auth-Key #{api_key()}"}
]
)
end
end

View file

@ -1,82 +0,0 @@
defmodule Pleroma.Akkoma.Translators.LibreTranslate do
@behaviour Pleroma.Akkoma.Translator
alias Pleroma.Config
alias Pleroma.HTTP
require Logger
defp api_key do
Config.get([:libre_translate, :api_key])
end
defp url do
Config.get([:libre_translate, :url])
end
@impl Pleroma.Akkoma.Translator
def languages do
with {:ok, %{status: 200} = response} <- do_languages(),
{:ok, body} <- Jason.decode(response.body) do
resp = Enum.map(body, fn %{"code" => code, "name" => name} -> %{code: code, name: name} end)
# No separate source/dest
{:ok, resp, resp}
else
{:ok, %{status: status} = response} ->
Logger.warning("LibreTranslate: Request rejected: #{inspect(response)}")
{:error, "LibreTranslate request failed (code #{status})"}
{:error, reason} ->
{:error, reason}
end
end
@impl Pleroma.Akkoma.Translator
def translate(string, from_language, to_language) do
with {:ok, %{status: 200} = response} <- do_request(string, from_language, to_language),
{:ok, body} <- Jason.decode(response.body) do
%{"translatedText" => translated} = body
detected =
if Map.has_key?(body, "detectedLanguage") do
get_in(body, ["detectedLanguage", "language"])
else
from_language
end
{:ok, detected, translated}
else
{:ok, %{status: status} = response} ->
Logger.warning("libre_translate: request failed, #{inspect(response)}")
{:error, "libre_translate: request failed (code #{status})"}
{:error, reason} ->
{:error, reason}
end
end
defp do_request(string, from_language, to_language) do
url = URI.parse(url())
url = %{url | path: "/translate"}
HTTP.post(
to_string(url),
Jason.encode!(%{
q: string,
source: if(is_nil(from_language), do: "auto", else: from_language),
target: to_language,
format: "html",
api_key: api_key()
}),
[
{"content-type", "application/json"}
]
)
end
defp do_languages() do
url = URI.parse(url())
url = %{url | path: "/languages"}
HTTP.get(to_string(url))
end
end

View file

@ -1,8 +0,0 @@
defmodule Pleroma.Akkoma.Translator do
@callback translate(String.t(), String.t() | nil, String.t()) ::
{:ok, String.t(), String.t()} | {:error, any()}
@callback languages() ::
{:ok, [%{name: String.t(), code: String.t()}],
[%{name: String.t(), code: String.t()}]}
| {:error, any()}
end

Some files were not shown because too many files have changed in this diff Show more