forked from AkkomaGang/akkoma
Merge branch 'develop' of https://akkoma.dev/AkkomaGang/akkoma into akko.wtf
This commit is contained in:
commit
2007b1c586
13 changed files with 299 additions and 78 deletions
|
@ -1 +0,0 @@
|
|||
https://github.com/hashnuke/heroku-buildpack-elixir
|
|
@ -37,6 +37,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
|
|||
- Issue leading to Mastodon bot accounts being rejected
|
||||
- Scope misdetection of remote posts resulting from not recognising
|
||||
JSON-LD-compacted forms of public scope; affected e.g. federation with bovine
|
||||
- Ratelimits encountered when fetching objects are now respected; 429 responses will cause a backoff when we get one.
|
||||
|
||||
## Removed
|
||||
- ActivityPub Client-To-Server write API endpoints have been disabled;
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
import Config
|
||||
|
||||
config :pleroma, Pleroma.Web.Endpoint,
|
||||
http: [
|
||||
port: String.to_integer(System.get_env("PORT") || "4000"),
|
||||
protocol_options: [max_request_line_length: 8192, max_header_value_length: 8192]
|
||||
],
|
||||
protocol: "http",
|
||||
secure_cookie_flag: false,
|
||||
url: [host: System.get_env("APP_HOST"), scheme: "https", port: 443],
|
||||
secret_key_base: "+S+ULgf7+N37c/lc9K66SMphnjQIRGklTu0BRr2vLm2ZzvK0Z6OH/PE77wlUNtvP"
|
||||
|
||||
database_url =
|
||||
System.get_env("DATABASE_URL") ||
|
||||
raise """
|
||||
environment variable DATABASE_URL is missing.
|
||||
For example: ecto://USER:PASS@HOST/DATABASE
|
||||
"""
|
||||
|
||||
config :pleroma, Pleroma.Repo,
|
||||
# ssl: true,
|
||||
url: database_url,
|
||||
pool_size: String.to_integer(System.get_env("POOL_SIZE") || "10")
|
||||
|
||||
config :pleroma, :instance, name: "#{System.get_env("APP_NAME")} CI Instance"
|
|
@ -1,12 +1,15 @@
|
|||
# Akkoma Clients
|
||||
Note: Additional clients may work, but these are known to work with Akkoma.
|
||||
Apps listed here might not support all of Akkoma's features.
|
||||
This is a list of clients that are known to work with Akkoma.
|
||||
|
||||
!!! warning
|
||||
**Clients listed here are not officially supported by the Akkoma project.**
|
||||
Some Akkoma features may be unsupported by these clients.
|
||||
|
||||
## Multiplatform
|
||||
### Kaiteki
|
||||
- Homepage: <https://kaiteki.app/>
|
||||
- Source Code: <https://github.com/Kaiteki-Fedi/Kaiteki>
|
||||
- Contact: [@kaiteki@fedi.software](https://fedi.software/@Kaiteki)
|
||||
- Contact: [@kaiteki@social.kaiteki.app](https://social.kaiteki.app/@kaiteki)
|
||||
- Platforms: Web, Windows, Linux, Android
|
||||
- Features: MastoAPI, Supports multiple backends
|
||||
|
||||
|
@ -38,12 +41,6 @@ Apps listed here might not support all of Akkoma's features.
|
|||
- Platforms: Android
|
||||
- Features: MastoAPI, No Streaming, Emoji Reactions, Text Formatting, FE Stickers
|
||||
|
||||
### Fedi
|
||||
- Homepage: <https://www.fediapp.com/>
|
||||
- Source Code: Proprietary, but gratis
|
||||
- Platforms: iOS, Android
|
||||
- Features: MastoAPI, Pleroma-specific features like Reactions
|
||||
|
||||
### Tusky
|
||||
- Homepage: <https://tuskyapp.github.io/>
|
||||
- Source Code: <https://github.com/tuskyapp/Tusky>
|
||||
|
@ -51,12 +48,18 @@ Apps listed here might not support all of Akkoma's features.
|
|||
- Platforms: Android
|
||||
- Features: MastoAPI, No Streaming
|
||||
|
||||
### Subway Tooter
|
||||
- Source Code: <https://github.com/tateisu/SubwayTooter/>
|
||||
- Contact: [@SubwayTooter@mastodon.juggler.jp](https://mastodon.juggler.jp/@SubwayTooter)
|
||||
- Platforms: Android
|
||||
- Features: MastoAPI, Editing, Emoji Reactions (including custom emoji)
|
||||
|
||||
## Alternative Web Interfaces
|
||||
### Pinafore
|
||||
- Note: Pinafore is unmaintained (See [the author's original article](https://nolanlawson.com/2023/01/09/retiring-pinafore/) for details)
|
||||
- Homepage: <https://pinafore.social/>
|
||||
- Source Code: <https://github.com/nolanlawson/pinafore>
|
||||
- Contact: [@pinafore@mastodon.technology](https://mastodon.technology/users/pinafore)
|
||||
### Enafore
|
||||
- An actively developed fork of Pinafore with improved Akkoma support
|
||||
- Homepage: <https://enafore.social/>
|
||||
- Source Code: <https://github.com/enafore/enafore>
|
||||
- Contact: [@enfore@enafore.social](https://meta.enafore.social/@enafore)
|
||||
- Features: MastoAPI, No Streaming
|
||||
|
||||
### Sengi
|
||||
|
|
|
@ -179,7 +179,8 @@ defp cachex_children do
|
|||
build_cachex("translations", default_ttl: :timer.hours(24 * 30), limit: 2500),
|
||||
build_cachex("instances", default_ttl: :timer.hours(24), ttl_interval: 1000, limit: 2500),
|
||||
build_cachex("request_signatures", default_ttl: :timer.hours(24 * 30), limit: 3000),
|
||||
build_cachex("rel_me", default_ttl: :timer.hours(24 * 30), limit: 300)
|
||||
build_cachex("rel_me", default_ttl: :timer.hours(24 * 30), limit: 300),
|
||||
build_cachex("http_backoff", default_ttl: :timer.hours(24 * 30), limit: 10000)
|
||||
]
|
||||
end
|
||||
|
||||
|
|
121
lib/pleroma/http/backoff.ex
Normal file
121
lib/pleroma/http/backoff.ex
Normal file
|
@ -0,0 +1,121 @@
|
|||
defmodule Pleroma.HTTP.Backoff do
|
||||
alias Pleroma.HTTP
|
||||
require Logger
|
||||
|
||||
@cachex Pleroma.Config.get([:cachex, :provider], Cachex)
|
||||
@backoff_cache :http_backoff_cache
|
||||
|
||||
# attempt to parse a timestamp from a header
|
||||
# returns nil if it can't parse the timestamp
|
||||
@spec timestamp_or_nil(binary) :: DateTime.t() | nil
|
||||
defp timestamp_or_nil(header) do
|
||||
case DateTime.from_iso8601(header) do
|
||||
{:ok, stamp, _} ->
|
||||
stamp
|
||||
|
||||
_ ->
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
# attempt to parse the x-ratelimit-reset header from the headers
|
||||
@spec x_ratelimit_reset(headers :: list) :: DateTime.t() | nil
|
||||
defp x_ratelimit_reset(headers) do
|
||||
with {_header, value} <- List.keyfind(headers, "x-ratelimit-reset", 0),
|
||||
true <- is_binary(value) do
|
||||
timestamp_or_nil(value)
|
||||
else
|
||||
_ ->
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
# attempt to parse the Retry-After header from the headers
|
||||
# this can be either a timestamp _or_ a number of seconds to wait!
|
||||
# we'll return a datetime if we can parse it, or nil if we can't
|
||||
@spec retry_after(headers :: list) :: DateTime.t() | nil
|
||||
defp retry_after(headers) do
|
||||
with {_header, value} <- List.keyfind(headers, "retry-after", 0),
|
||||
true <- is_binary(value) do
|
||||
# first, see if it's an integer
|
||||
case Integer.parse(value) do
|
||||
{seconds, ""} ->
|
||||
Logger.debug("Parsed Retry-After header: #{seconds} seconds")
|
||||
DateTime.utc_now() |> Timex.shift(seconds: seconds)
|
||||
|
||||
_ ->
|
||||
# if it's not an integer, try to parse it as a timestamp
|
||||
timestamp_or_nil(value)
|
||||
end
|
||||
else
|
||||
_ ->
|
||||
nil
|
||||
end
|
||||
end
|
||||
|
||||
# given a set of headers, will attempt to find the next backoff timestamp
|
||||
# if it can't find one, it will default to 5 minutes from now
|
||||
@spec next_backoff_timestamp(%{headers: list}) :: DateTime.t()
|
||||
defp next_backoff_timestamp(%{headers: headers}) when is_list(headers) do
|
||||
default_5_minute_backoff =
|
||||
DateTime.utc_now()
|
||||
|> Timex.shift(seconds: 5 * 60)
|
||||
|
||||
backoff =
|
||||
[&x_ratelimit_reset/1, &retry_after/1]
|
||||
|> Enum.map(& &1.(headers))
|
||||
|> Enum.find(&(&1 != nil))
|
||||
|
||||
if is_nil(backoff) do
|
||||
Logger.debug("No backoff headers found, defaulting to 5 minutes from now")
|
||||
default_5_minute_backoff
|
||||
else
|
||||
Logger.debug("Found backoff header, will back off until: #{backoff}")
|
||||
backoff
|
||||
end
|
||||
end
|
||||
|
||||
defp next_backoff_timestamp(_), do: DateTime.utc_now() |> Timex.shift(seconds: 5 * 60)
|
||||
|
||||
# utility function to check the HTTP response for potential backoff headers
|
||||
# will check if we get a 429 or 503 response, and if we do, will back off for a bit
|
||||
@spec check_backoff({:ok | :error, HTTP.Env.t()}, binary()) ::
|
||||
{:ok | :error, HTTP.Env.t()} | {:error, :ratelimit}
|
||||
defp check_backoff({:ok, env}, host) do
|
||||
case env.status do
|
||||
status when status in [429, 503] ->
|
||||
Logger.error("Rate limited on #{host}! Backing off...")
|
||||
timestamp = next_backoff_timestamp(env)
|
||||
ttl = Timex.diff(timestamp, DateTime.utc_now(), :seconds)
|
||||
# we will cache the host for 5 minutes
|
||||
@cachex.put(@backoff_cache, host, true, ttl: ttl)
|
||||
{:error, :ratelimit}
|
||||
|
||||
_ ->
|
||||
{:ok, env}
|
||||
end
|
||||
end
|
||||
|
||||
defp check_backoff(env, _), do: env
|
||||
|
||||
@doc """
|
||||
this acts as a single throughput for all GET requests
|
||||
we will check if the host is in the cache, and if it is, we will automatically fail the request
|
||||
this ensures that we don't hammer the server with requests, and instead wait for the backoff to expire
|
||||
this is a very simple implementation, and can be improved upon!
|
||||
"""
|
||||
@spec get(binary, list, list) :: {:ok | :error, HTTP.Env.t()} | {:error, :ratelimit}
|
||||
def get(url, headers \\ [], options \\ []) do
|
||||
%{host: host} = URI.parse(url)
|
||||
|
||||
case @cachex.get(@backoff_cache, host) do
|
||||
{:ok, nil} ->
|
||||
url
|
||||
|> HTTP.get(headers, options)
|
||||
|> check_backoff(host)
|
||||
|
||||
_ ->
|
||||
{:error, :ratelimit}
|
||||
end
|
||||
end
|
||||
end
|
|
@ -354,7 +354,7 @@ def get_object(id) do
|
|||
|
||||
with {:ok, %{body: body, status: code, headers: headers, url: final_url}}
|
||||
when code in 200..299 <-
|
||||
HTTP.get(id, headers),
|
||||
HTTP.Backoff.get(id, headers),
|
||||
remote_host <-
|
||||
URI.parse(final_url).host,
|
||||
{:cross_domain_redirect, false} <-
|
||||
|
|
|
@ -10,7 +10,7 @@ defmodule Pleroma.Signature do
|
|||
alias Pleroma.User
|
||||
alias Pleroma.Web.ActivityPub.ActivityPub
|
||||
|
||||
@known_suffixes ["/publickey", "/main-key"]
|
||||
@known_suffixes ["/publickey", "/main-key", "#key"]
|
||||
|
||||
def key_id_to_actor_id(key_id) do
|
||||
uri =
|
||||
|
|
|
@ -160,7 +160,8 @@ def find_lrdd_template(domain) do
|
|||
# WebFinger is restricted to HTTPS - https://tools.ietf.org/html/rfc7033#section-9.1
|
||||
meta_url = "https://#{domain}/.well-known/host-meta"
|
||||
|
||||
with {:ok, %{status: status, body: body}} when status in 200..299 <- HTTP.get(meta_url) do
|
||||
with {:ok, %{status: status, body: body}} when status in 200..299 <-
|
||||
HTTP.Backoff.get(meta_url) do
|
||||
get_template_from_xml(body)
|
||||
else
|
||||
error ->
|
||||
|
@ -197,7 +198,7 @@ def finger(account) do
|
|||
|
||||
with address when is_binary(address) <- get_address_from_domain(domain, encoded_account),
|
||||
{:ok, %{status: status, body: body, headers: headers}} when status in 200..299 <-
|
||||
HTTP.get(
|
||||
HTTP.Backoff.get(
|
||||
address,
|
||||
[{"accept", "application/xrd+xml,application/jrd+json"}]
|
||||
) do
|
||||
|
|
|
@ -1,37 +1,10 @@
|
|||
defmodule Pleroma.Repo.Migrations.UploadFilterExiftoolToExiftoolStripMetadata do
|
||||
use Ecto.Migration
|
||||
|
||||
alias Pleroma.ConfigDB
|
||||
|
||||
def up,
|
||||
do:
|
||||
ConfigDB.get_by_params(%{group: :pleroma, key: Pleroma.Upload})
|
||||
|> update_filtername(
|
||||
Pleroma.Upload.Filter.Exiftool,
|
||||
Pleroma.Upload.Filter.Exiftool.StripMetadata
|
||||
)
|
||||
|
||||
def down,
|
||||
do:
|
||||
ConfigDB.get_by_params(%{group: :pleroma, key: Pleroma.Upload})
|
||||
|> update_filtername(
|
||||
Pleroma.Upload.Filter.Exiftool.StripMetadata,
|
||||
Pleroma.Upload.Filter.Exiftool
|
||||
)
|
||||
|
||||
defp update_filtername(%{value: value}, from_filtername, to_filtername) do
|
||||
new_value =
|
||||
value
|
||||
|> Keyword.update(:filters, [], fn filters ->
|
||||
filters
|
||||
|> Enum.map(fn
|
||||
^from_filtername -> to_filtername
|
||||
filter -> filter
|
||||
end)
|
||||
end)
|
||||
|
||||
ConfigDB.update_or_create(%{group: :pleroma, key: Pleroma.Upload, value: new_value})
|
||||
end
|
||||
|
||||
defp update_filtername(_, _, _), do: nil
|
||||
# 20240425120000_upload_filter_exiftool_to_exiftool_strip_location.exs
|
||||
# was originally committed with the id used in this file, but this breaks
|
||||
# rollback order. Thus it was moved to 20240425120000 and this stub just prevents
|
||||
# errors during large-scale rollbacks for anyone who already applied the old id
|
||||
def up, do: :ok
|
||||
def down, do: :ok
|
||||
end
|
||||
|
|
|
@ -0,0 +1,37 @@
|
|||
defmodule Pleroma.Repo.Migrations.UploadFilterExiftoolToExiftoolStripMetadataReal do
|
||||
use Ecto.Migration
|
||||
|
||||
alias Pleroma.ConfigDB
|
||||
|
||||
def up,
|
||||
do:
|
||||
ConfigDB.get_by_params(%{group: :pleroma, key: Pleroma.Upload})
|
||||
|> update_filtername(
|
||||
Pleroma.Upload.Filter.Exiftool,
|
||||
Pleroma.Upload.Filter.Exiftool.StripMetadata
|
||||
)
|
||||
|
||||
def down,
|
||||
do:
|
||||
ConfigDB.get_by_params(%{group: :pleroma, key: Pleroma.Upload})
|
||||
|> update_filtername(
|
||||
Pleroma.Upload.Filter.Exiftool.StripMetadata,
|
||||
Pleroma.Upload.Filter.Exiftool
|
||||
)
|
||||
|
||||
defp update_filtername(%{value: value}, from_filtername, to_filtername) do
|
||||
new_value =
|
||||
value
|
||||
|> Keyword.update(:filters, [], fn filters ->
|
||||
filters
|
||||
|> Enum.map(fn
|
||||
^from_filtername -> to_filtername
|
||||
filter -> filter
|
||||
end)
|
||||
end)
|
||||
|
||||
ConfigDB.update_or_create(%{group: :pleroma, key: Pleroma.Upload, value: new_value})
|
||||
end
|
||||
|
||||
defp update_filtername(_, _, _), do: nil
|
||||
end
|
105
test/pleroma/http/backoff_test.exs
Normal file
105
test/pleroma/http/backoff_test.exs
Normal file
|
@ -0,0 +1,105 @@
|
|||
defmodule Pleroma.HTTP.BackoffTest do
|
||||
@backoff_cache :http_backoff_cache
|
||||
use Pleroma.DataCase, async: false
|
||||
alias Pleroma.HTTP.Backoff
|
||||
|
||||
defp within_tolerance?(ttl, expected) do
|
||||
ttl > expected - 10 and ttl < expected + 10
|
||||
end
|
||||
|
||||
describe "get/3" do
|
||||
test "should return {:ok, env} when not rate limited" do
|
||||
Tesla.Mock.mock_global(fn
|
||||
%Tesla.Env{url: "https://akkoma.dev/api/v1/instance"} ->
|
||||
{:ok, %Tesla.Env{status: 200, body: "ok"}}
|
||||
end)
|
||||
|
||||
assert {:ok, env} = Backoff.get("https://akkoma.dev/api/v1/instance")
|
||||
assert env.status == 200
|
||||
end
|
||||
|
||||
test "should return {:error, env} when rate limited" do
|
||||
# Shove a value into the cache to simulate a rate limit
|
||||
Cachex.put(@backoff_cache, "akkoma.dev", true)
|
||||
assert {:error, :ratelimit} = Backoff.get("https://akkoma.dev/api/v1/instance")
|
||||
end
|
||||
|
||||
test "should insert a value into the cache when rate limited" do
|
||||
Tesla.Mock.mock_global(fn
|
||||
%Tesla.Env{url: "https://ratelimited.dev/api/v1/instance"} ->
|
||||
{:ok, %Tesla.Env{status: 429, body: "Rate limited"}}
|
||||
end)
|
||||
|
||||
assert {:error, :ratelimit} = Backoff.get("https://ratelimited.dev/api/v1/instance")
|
||||
assert {:ok, true} = Cachex.get(@backoff_cache, "ratelimited.dev")
|
||||
end
|
||||
|
||||
test "should insert a value into the cache when rate limited with a 503 response" do
|
||||
Tesla.Mock.mock_global(fn
|
||||
%Tesla.Env{url: "https://ratelimited.dev/api/v1/instance"} ->
|
||||
{:ok, %Tesla.Env{status: 503, body: "Rate limited"}}
|
||||
end)
|
||||
|
||||
assert {:error, :ratelimit} = Backoff.get("https://ratelimited.dev/api/v1/instance")
|
||||
assert {:ok, true} = Cachex.get(@backoff_cache, "ratelimited.dev")
|
||||
end
|
||||
|
||||
test "should parse the value of x-ratelimit-reset, if present" do
|
||||
ten_minutes_from_now =
|
||||
DateTime.utc_now() |> Timex.shift(minutes: 10) |> DateTime.to_iso8601()
|
||||
|
||||
Tesla.Mock.mock_global(fn
|
||||
%Tesla.Env{url: "https://ratelimited.dev/api/v1/instance"} ->
|
||||
{:ok,
|
||||
%Tesla.Env{
|
||||
status: 429,
|
||||
body: "Rate limited",
|
||||
headers: [{"x-ratelimit-reset", ten_minutes_from_now}]
|
||||
}}
|
||||
end)
|
||||
|
||||
assert {:error, :ratelimit} = Backoff.get("https://ratelimited.dev/api/v1/instance")
|
||||
assert {:ok, true} = Cachex.get(@backoff_cache, "ratelimited.dev")
|
||||
{:ok, ttl} = Cachex.ttl(@backoff_cache, "ratelimited.dev")
|
||||
assert within_tolerance?(ttl, 600)
|
||||
end
|
||||
|
||||
test "should parse the value of retry-after when it's a timestamp" do
|
||||
ten_minutes_from_now =
|
||||
DateTime.utc_now() |> Timex.shift(minutes: 10) |> DateTime.to_iso8601()
|
||||
|
||||
Tesla.Mock.mock_global(fn
|
||||
%Tesla.Env{url: "https://ratelimited.dev/api/v1/instance"} ->
|
||||
{:ok,
|
||||
%Tesla.Env{
|
||||
status: 429,
|
||||
body: "Rate limited",
|
||||
headers: [{"retry-after", ten_minutes_from_now}]
|
||||
}}
|
||||
end)
|
||||
|
||||
assert {:error, :ratelimit} = Backoff.get("https://ratelimited.dev/api/v1/instance")
|
||||
assert {:ok, true} = Cachex.get(@backoff_cache, "ratelimited.dev")
|
||||
{:ok, ttl} = Cachex.ttl(@backoff_cache, "ratelimited.dev")
|
||||
assert within_tolerance?(ttl, 600)
|
||||
end
|
||||
|
||||
test "should parse the value of retry-after when it's a number of seconds" do
|
||||
Tesla.Mock.mock_global(fn
|
||||
%Tesla.Env{url: "https://ratelimited.dev/api/v1/instance"} ->
|
||||
{:ok,
|
||||
%Tesla.Env{
|
||||
status: 429,
|
||||
body: "Rate limited",
|
||||
headers: [{"retry-after", "600"}]
|
||||
}}
|
||||
end)
|
||||
|
||||
assert {:error, :ratelimit} = Backoff.get("https://ratelimited.dev/api/v1/instance")
|
||||
assert {:ok, true} = Cachex.get(@backoff_cache, "ratelimited.dev")
|
||||
# assert that the value is 10 minutes from now
|
||||
{:ok, ttl} = Cachex.ttl(@backoff_cache, "ratelimited.dev")
|
||||
assert within_tolerance?(ttl, 600)
|
||||
end
|
||||
end
|
||||
end
|
|
@ -153,6 +153,11 @@ test "it deduces the actor ID for streams" do
|
|||
{:ok, "https://example.com/users/1234"}
|
||||
end
|
||||
|
||||
test "it deduces the actor ID for bridgy" do
|
||||
assert Signature.key_id_to_actor_id("https://example.com/1234#key") ==
|
||||
{:ok, "https://example.com/1234"}
|
||||
end
|
||||
|
||||
test "it calls webfinger for 'acct:' accounts" do
|
||||
with_mock(Pleroma.Web.WebFinger,
|
||||
finger: fn _ -> {:ok, %{"ap_id" => "https://gensokyo.2hu/users/raymoo"}} end
|
||||
|
|
Loading…
Reference in a new issue