56 lines
1.9 KiB
Elixir
56 lines
1.9 KiB
Elixir
defmodule Pleroma.HTTP.Backoff do
|
|
alias Pleroma.HTTP
|
|
require Logger
|
|
|
|
@cachex Pleroma.Config.get([:cachex, :provider], Cachex)
|
|
@backoff_cache :http_backoff_cache
|
|
|
|
defp next_backoff_timestamp(%{headers: headers}) when is_list(headers) do
|
|
# figure out from the 429 response when we can make the next request
|
|
# mastodon uses the x-ratelimit-reset header, so we will use that!
|
|
# other servers may not, so we'll default to 5 minutes from now if we can't find it
|
|
case Enum.find_value(headers, fn {"x-ratelimit-reset", value} -> value end) do
|
|
nil ->
|
|
DateTime.utc_now()
|
|
|> Timex.shift(seconds: 5 * 60)
|
|
|
|
value ->
|
|
{:ok, stamp} = DateTime.from_iso8601(value)
|
|
stamp
|
|
end
|
|
end
|
|
|
|
defp next_backoff_timestamp(_), do: DateTime.utc_now() |> Timex.shift(seconds: 5 * 60)
|
|
|
|
def get(url, headers \\ [], options \\ []) do
|
|
# this acts as a single throughput for all GET requests
|
|
# we will check if the host is in the cache, and if it is, we will automatically fail the request
|
|
# this ensures that we don't hammer the server with requests, and instead wait for the backoff to expire
|
|
# this is a very simple implementation, and can be improved upon!
|
|
%{host: host} = URI.parse(url)
|
|
case @cachex.get(@backoff_cache, host) do
|
|
{:ok, nil} ->
|
|
case HTTP.get(url, headers, options) do
|
|
{:ok, env} ->
|
|
case env.status do
|
|
429 ->
|
|
Logger.error("Rate limited on #{host}! Backing off...")
|
|
timestamp = next_backoff_timestamp(env)
|
|
ttl = Timex.diff(timestamp, DateTime.utc_now(), :seconds)
|
|
# we will cache the host for 5 minutes
|
|
@cachex.put(@backoff_cache, host, true, ttl)
|
|
{:error, :ratelimit}
|
|
|
|
_ ->
|
|
{:ok, env}
|
|
end
|
|
|
|
{:error, env} ->
|
|
{:error, env}
|
|
end
|
|
|
|
_ ->
|
|
{:error, :ratelimit}
|
|
end
|
|
end
|
|
end
|