forked from AkkomaGang/akkoma
Add a default rule to not attempt to cache any files larger than 50MB
This fixes connection failures when trying to retrieve large files. It is less common in typical Pleroma usage, but it's possible to encounter this on a cloud instance with lower memory.
This commit is contained in:
parent
d7262f7d22
commit
1d8f1fe077
1 changed files with 7 additions and 0 deletions
|
@ -59,6 +59,13 @@ sub vcl_backend_response {
|
|||
set beresp.http.CR = beresp.http.content-range;
|
||||
}
|
||||
|
||||
# Bypass cache for large files
|
||||
# 50000000 ~ 50MB
|
||||
if (std.integer(beresp.http.content-length, 0) > 50000000) {
|
||||
set beresp.uncacheable = true;
|
||||
return(deliver);
|
||||
}
|
||||
|
||||
# Don't cache objects that require authentication
|
||||
if (beresp.http.Authorization && !beresp.http.Cache-Control ~ "public") {
|
||||
set beresp.uncacheable = true;
|
||||
|
|
Loading…
Reference in a new issue