2017-05-17 16:00:09 +00:00
|
|
|
defmodule Pleroma.Formatter do
|
2017-06-14 11:58:56 +00:00
|
|
|
alias Pleroma.User
|
2017-12-12 11:30:24 +00:00
|
|
|
alias Pleroma.Web.MediaProxy
|
2018-09-09 23:40:24 +00:00
|
|
|
alias Pleroma.HTML
|
2018-11-05 12:24:00 +00:00
|
|
|
alias Pleroma.Emoji
|
2017-05-17 16:00:09 +00:00
|
|
|
|
|
|
|
@tag_regex ~r/\#\w+/u
|
2017-11-18 14:30:18 +00:00
|
|
|
def parse_tags(text, data \\ %{}) do
|
2017-05-17 16:00:09 +00:00
|
|
|
Regex.scan(@tag_regex, text)
|
2018-03-30 13:01:53 +00:00
|
|
|
|> Enum.map(fn ["#" <> tag = full_tag] -> {full_tag, String.downcase(tag)} end)
|
|
|
|
|> (fn map ->
|
|
|
|
if data["sensitive"] in [true, "True", "true", "1"],
|
|
|
|
do: [{"#nsfw", "nsfw"}] ++ map,
|
|
|
|
else: map
|
|
|
|
end).()
|
2017-05-17 16:00:09 +00:00
|
|
|
end
|
2017-06-14 11:58:56 +00:00
|
|
|
|
|
|
|
def parse_mentions(text) do
|
|
|
|
# Modified from https://www.w3.org/TR/html5/forms.html#valid-e-mail-address
|
2018-03-30 13:01:53 +00:00
|
|
|
regex =
|
2018-12-12 20:44:08 +00:00
|
|
|
~r/@[a-zA-Z0-9.!#$%&'*+\/=?^_`{|}~-]*@?[a-zA-Z0-9_-](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)*/u
|
2017-06-14 11:58:56 +00:00
|
|
|
|
|
|
|
Regex.scan(regex, text)
|
2018-03-30 13:01:53 +00:00
|
|
|
|> List.flatten()
|
|
|
|
|> Enum.uniq()
|
|
|
|
|> Enum.map(fn "@" <> match = full_match ->
|
|
|
|
{full_match, User.get_cached_by_nickname(match)}
|
|
|
|
end)
|
|
|
|
|> Enum.filter(fn {_match, user} -> user end)
|
2017-06-14 11:58:56 +00:00
|
|
|
end
|
2017-06-19 15:51:43 +00:00
|
|
|
|
2018-11-05 12:24:00 +00:00
|
|
|
def emojify(text) do
|
|
|
|
emojify(text, Emoji.get_all())
|
|
|
|
end
|
2017-09-16 13:47:45 +00:00
|
|
|
|
2018-03-28 07:56:32 +00:00
|
|
|
def emojify(text, nil), do: text
|
2018-03-30 13:01:53 +00:00
|
|
|
|
2018-03-28 07:56:32 +00:00
|
|
|
def emojify(text, emoji) do
|
2018-03-30 13:01:53 +00:00
|
|
|
Enum.reduce(emoji, text, fn {emoji, file}, text ->
|
2018-09-09 23:40:24 +00:00
|
|
|
emoji = HTML.strip_tags(emoji)
|
|
|
|
file = HTML.strip_tags(file)
|
2018-03-30 13:01:53 +00:00
|
|
|
|
|
|
|
String.replace(
|
|
|
|
text,
|
|
|
|
":#{emoji}:",
|
|
|
|
"<img height='32px' width='32px' alt='#{emoji}' title='#{emoji}' src='#{
|
|
|
|
MediaProxy.url(file)
|
|
|
|
}' />"
|
|
|
|
)
|
2018-09-09 23:40:24 +00:00
|
|
|
|> HTML.filter_tags()
|
2017-06-19 15:51:43 +00:00
|
|
|
end)
|
|
|
|
end
|
2017-09-16 14:14:23 +00:00
|
|
|
|
2018-09-02 23:44:37 +00:00
|
|
|
def get_emoji(text) when is_binary(text) do
|
2018-11-05 12:24:00 +00:00
|
|
|
Enum.filter(Emoji.get_all(), fn {emoji, _} -> String.contains?(text, ":#{emoji}:") end)
|
2017-09-16 14:14:23 +00:00
|
|
|
end
|
2017-10-19 19:51:56 +00:00
|
|
|
|
2018-09-02 23:44:37 +00:00
|
|
|
def get_emoji(_), do: []
|
|
|
|
|
2018-06-18 10:45:15 +00:00
|
|
|
@link_regex ~r/[0-9a-z+\-\.]+:[0-9a-z$-_.+!*'(),]+/ui
|
|
|
|
|
2018-10-18 05:36:58 +00:00
|
|
|
@uri_schemes Application.get_env(:pleroma, :uri_schemes, [])
|
|
|
|
@valid_schemes Keyword.get(@uri_schemes, :valid_schemes, [])
|
2018-03-24 10:03:10 +00:00
|
|
|
|
2018-06-18 10:45:15 +00:00
|
|
|
# TODO: make it use something other than @link_regex
|
2018-09-02 00:14:25 +00:00
|
|
|
def html_escape(text, "text/html") do
|
2018-09-09 23:40:24 +00:00
|
|
|
HTML.filter_tags(text)
|
2018-09-02 00:14:25 +00:00
|
|
|
end
|
|
|
|
|
|
|
|
def html_escape(text, "text/plain") do
|
2018-03-24 10:03:10 +00:00
|
|
|
Regex.split(@link_regex, text, include_captures: true)
|
|
|
|
|> Enum.map_every(2, fn chunk ->
|
|
|
|
{:safe, part} = Phoenix.HTML.html_escape(chunk)
|
|
|
|
part
|
|
|
|
end)
|
|
|
|
|> Enum.join("")
|
|
|
|
end
|
|
|
|
|
2018-06-18 10:45:15 +00:00
|
|
|
@doc "changes scheme:... urls to html links"
|
2018-03-24 10:03:10 +00:00
|
|
|
def add_links({subs, text}) do
|
2018-03-30 13:01:53 +00:00
|
|
|
links =
|
2018-06-18 10:45:15 +00:00
|
|
|
text
|
|
|
|
|> String.split([" ", "\t", "<br>"])
|
2018-10-18 05:36:58 +00:00
|
|
|
|> Enum.filter(fn word -> String.starts_with?(word, @valid_schemes) end)
|
2018-06-18 10:45:15 +00:00
|
|
|
|> Enum.filter(fn word -> Regex.match?(@link_regex, word) end)
|
|
|
|
|> Enum.map(fn url -> {Ecto.UUID.generate(), url} end)
|
2018-05-19 13:22:43 +00:00
|
|
|
|> Enum.sort_by(fn {_, url} -> -String.length(url) end)
|
2018-03-24 10:03:10 +00:00
|
|
|
|
2018-03-30 13:01:53 +00:00
|
|
|
uuid_text =
|
|
|
|
links
|
|
|
|
|> Enum.reduce(text, fn {uuid, url}, acc -> String.replace(acc, url, uuid) end)
|
2018-03-24 10:03:10 +00:00
|
|
|
|
2018-03-30 13:01:53 +00:00
|
|
|
subs =
|
|
|
|
subs ++
|
|
|
|
Enum.map(links, fn {uuid, url} ->
|
2018-09-28 15:20:28 +00:00
|
|
|
{uuid, "<a href=\"#{url}\">#{url}</a>"}
|
2018-03-30 13:01:53 +00:00
|
|
|
end)
|
2018-03-24 10:03:10 +00:00
|
|
|
|
|
|
|
{subs, uuid_text}
|
|
|
|
end
|
|
|
|
|
|
|
|
@doc "Adds the links to mentioned users"
|
|
|
|
def add_user_links({subs, text}, mentions) do
|
2018-03-30 13:01:53 +00:00
|
|
|
mentions =
|
|
|
|
mentions
|
|
|
|
|> Enum.sort_by(fn {name, _} -> -String.length(name) end)
|
|
|
|
|> Enum.map(fn {name, user} -> {name, user, Ecto.UUID.generate()} end)
|
|
|
|
|
|
|
|
uuid_text =
|
|
|
|
mentions
|
|
|
|
|> Enum.reduce(text, fn {match, _user, uuid}, text ->
|
|
|
|
String.replace(text, match, uuid)
|
|
|
|
end)
|
|
|
|
|
|
|
|
subs =
|
|
|
|
subs ++
|
2018-12-05 19:35:41 +00:00
|
|
|
Enum.map(mentions, fn {match, %User{id: id, ap_id: ap_id, info: info}, uuid} ->
|
2018-10-17 19:15:20 +00:00
|
|
|
ap_id =
|
2018-11-18 20:40:52 +00:00
|
|
|
if is_binary(info.source_data["url"]) do
|
|
|
|
info.source_data["url"]
|
2018-10-17 19:15:20 +00:00
|
|
|
else
|
|
|
|
ap_id
|
|
|
|
end
|
2018-04-03 16:32:46 +00:00
|
|
|
|
2018-03-30 13:01:53 +00:00
|
|
|
short_match = String.split(match, "@") |> tl() |> hd()
|
2018-05-27 13:06:38 +00:00
|
|
|
|
|
|
|
{uuid,
|
2018-12-05 19:35:41 +00:00
|
|
|
"<span><a data-user='#{id}' class='mention' href='#{ap_id}'>@<span>#{short_match}</span></a></span>"}
|
2018-03-30 13:01:53 +00:00
|
|
|
end)
|
2018-03-24 10:03:10 +00:00
|
|
|
|
|
|
|
{subs, uuid_text}
|
|
|
|
end
|
|
|
|
|
|
|
|
@doc "Adds the hashtag links"
|
|
|
|
def add_hashtag_links({subs, text}, tags) do
|
2018-03-30 13:01:53 +00:00
|
|
|
tags =
|
|
|
|
tags
|
|
|
|
|> Enum.sort_by(fn {name, _} -> -String.length(name) end)
|
|
|
|
|> Enum.map(fn {name, short} -> {name, short, Ecto.UUID.generate()} end)
|
|
|
|
|
|
|
|
uuid_text =
|
|
|
|
tags
|
|
|
|
|> Enum.reduce(text, fn {match, _short, uuid}, text ->
|
|
|
|
String.replace(text, match, uuid)
|
|
|
|
end)
|
|
|
|
|
|
|
|
subs =
|
|
|
|
subs ++
|
2018-08-05 00:35:29 +00:00
|
|
|
Enum.map(tags, fn {tag_text, tag, uuid} ->
|
2018-12-05 19:35:41 +00:00
|
|
|
url =
|
|
|
|
"<a data-tag='#{tag}' href='#{Pleroma.Web.base_url()}/tag/#{tag}' rel='tag'>#{
|
|
|
|
tag_text
|
|
|
|
}</a>"
|
|
|
|
|
2018-03-30 13:01:53 +00:00
|
|
|
{uuid, url}
|
|
|
|
end)
|
2018-03-24 10:03:10 +00:00
|
|
|
|
|
|
|
{subs, uuid_text}
|
|
|
|
end
|
|
|
|
|
|
|
|
def finalize({subs, text}) do
|
2018-03-30 13:01:53 +00:00
|
|
|
Enum.reduce(subs, text, fn {uuid, replacement}, result_text ->
|
2018-03-24 10:03:10 +00:00
|
|
|
String.replace(result_text, uuid, replacement)
|
|
|
|
end)
|
|
|
|
end
|
2018-12-10 19:08:02 +00:00
|
|
|
|
|
|
|
def truncate(text, opts \\ []) do
|
2018-12-13 21:16:54 +00:00
|
|
|
max_length = opts[:max_length] || 200
|
|
|
|
omission = opts[:omission] || "..."
|
2018-12-10 19:08:02 +00:00
|
|
|
|
|
|
|
cond do
|
|
|
|
not String.valid?(text) ->
|
|
|
|
text
|
2018-12-13 21:16:54 +00:00
|
|
|
|
2018-12-10 19:08:02 +00:00
|
|
|
String.length(text) < max_length ->
|
|
|
|
text
|
2018-12-13 21:16:54 +00:00
|
|
|
|
2018-12-10 19:08:02 +00:00
|
|
|
true ->
|
|
|
|
length_with_omission = max_length - String.length(omission)
|
|
|
|
|
|
|
|
"#{String.slice(text, 0, length_with_omission)}#{omission}"
|
|
|
|
end
|
|
|
|
end
|
2017-05-17 16:00:09 +00:00
|
|
|
end
|