spc-pleroma/lib/pleroma/application.ex

303 lines
9.1 KiB
Elixir
Raw Normal View History

# Pleroma: A lightweight social networking server
2022-02-26 06:11:42 +00:00
# Copyright © 2017-2022 Pleroma Authors <https://pleroma.social/>
# SPDX-License-Identifier: AGPL-3.0-only
2017-03-17 16:09:58 +00:00
defmodule Pleroma.Application do
use Application
2020-02-11 07:12:57 +00:00
import Cachex.Spec
alias Pleroma.Config
2019-12-09 12:11:54 +00:00
require Logger
2017-03-17 16:09:58 +00:00
@name Mix.Project.config()[:name]
2018-11-20 16:55:03 +00:00
@version Mix.Project.config()[:version]
@repository Mix.Project.config()[:source_url]
2019-08-14 15:55:17 +00:00
2018-11-20 16:55:03 +00:00
def name, do: @name
def version, do: @version
def named_version, do: @name <> " " <> @version
def repository, do: @repository
2018-11-20 16:55:03 +00:00
def user_agent do
if Process.whereis(Pleroma.Web.Endpoint) do
case Config.get([:http, :user_agent], :default) do
:default ->
info = "#{Pleroma.Web.Endpoint.url()} <#{Config.get([:instance, :email], "")}>"
named_version() <> "; " <> info
custom ->
custom
end
else
# fallback, if endpoint is not started yet
"Pleroma Data Loader"
end
2018-11-23 16:40:45 +00:00
end
2017-03-17 16:09:58 +00:00
# See http://elixir-lang.org/docs/stable/elixir/Application.html
# for more information on OTP Applications
def start(_type, _args) do
# Scrubbers are compiled at runtime and therefore will cause a conflict
# every time the application is restarted, so we disable module
# conflicts at runtime
Code.compiler_options(ignore_module_conflict: true)
# Disable warnings_as_errors at runtime, it breaks Phoenix live reload
# due to protocol consolidation warnings
Code.compiler_options(warnings_as_errors: false)
Pleroma.Telemetry.Logger.attach()
2020-07-09 15:53:51 +00:00
Config.Holder.save_default()
Pleroma.HTML.compile_scrubbers()
Pleroma.Config.Oban.warn()
2020-02-11 07:12:57 +00:00
Config.DeprecationWarnings.warn()
Pleroma.Web.Plugs.HTTPSecurityPlug.warn_if_disabled()
Pleroma.ApplicationRequirements.verify!()
2019-12-05 13:18:25 +00:00
load_custom_modules()
Pleroma.Docs.JSON.compile()
limiters_setup()
2019-01-28 11:09:41 +00:00
2020-03-12 15:28:54 +00:00
adapter = Application.get_env(:tesla, :adapter)
if match?({Tesla.Adapter.Finch, _}, adapter) do
Logger.info("Starting Finch")
Finch.start_link(name: MyFinch)
end
2020-03-12 15:28:54 +00:00
if adapter == Tesla.Adapter.Gun do
2020-03-04 06:23:42 +00:00
if version = Pleroma.OTPVersion.version() do
[major, minor] =
version
|> String.split(".")
|> Enum.map(&String.to_integer/1)
|> Enum.take(2)
if (major == 22 and minor < 2) or major < 22 do
raise "
!!!OTP VERSION WARNING!!!
You are using gun adapter with OTP version #{version}, which doesn't support correct handling of unordered certificates chains. Please update your Erlang/OTP to at least 22.2.
2020-03-04 06:23:42 +00:00
"
end
else
raise "
!!!OTP VERSION WARNING!!!
To support correct handling of unordered certificates chains - OTP version must be > 22.2.
"
end
end
2017-03-17 16:09:58 +00:00
# Define workers and child supervisors to be supervised
2018-03-30 13:01:53 +00:00
children =
[
Pleroma.PromEx,
2019-08-14 15:55:17 +00:00
Pleroma.Repo,
2020-02-11 07:12:57 +00:00
Config.TransferTask,
2019-08-14 15:55:17 +00:00
Pleroma.Emoji,
Pleroma.Web.Plugs.RateLimiter.Supervisor,
{Task.Supervisor, name: Pleroma.TaskSupervisor}
2018-03-30 13:01:53 +00:00
] ++
2019-08-14 15:55:17 +00:00
cachex_children() ++
http_children(adapter) ++
[
2019-08-14 15:55:17 +00:00
Pleroma.Stats,
2019-09-26 11:49:57 +00:00
Pleroma.JobQueueMonitor,
{Majic.Pool, [name: Pleroma.MajicPool, pool_size: Config.get([:majic_pool, :size], 2)]},
{Oban, Config.get(Oban)},
Pleroma.Web.Endpoint
] ++
task_children() ++
streamer_registry() ++
background_migrators() ++
shout_child(shout_enabled?()) ++
[Pleroma.Gopher.Server]
2017-03-17 16:09:58 +00:00
# See http://elixir-lang.org/docs/stable/elixir/Supervisor.html
# for other strategies and supported options
Allow higher amount of restarts for Pleroma.Repo during testing This was done by floatingghost as part of a bigger commit in Akkoma. See <https://akkoma.dev/AkkomaGang/akkoma/src/commit/37ae047e1652c4089934434ec79f393c4c839122/lib/pleroma/application.ex#L83>. As explained in <https://ihatebeinga.live/objects/860d23e1-dc64-4b07-8b4d-020b9c56cff6> > there are so many caches that clearing them all can nuke the supervisor, which by default will become an hero if it gets more than 3 restarts in <5 seconds And further down the thread > essentially we've got like 11 caches (https://akkoma.dev/AkkomaGang/akkoma/src/commit/37ae047e1652c4089934434ec79f393c4c839122/lib/pleroma/application.ex#L165) > then in test we fetch them all (https://akkoma.dev/AkkomaGang/akkoma/src/branch/develop/test/support/data_case.ex#L50) and call clear on them > so if this clear fails on any 3 of them, the pleroma supervisor itself will die How it fails? > idk maybe cachex dies, maybe :ets does a weird thing > it doesn't log anything, it just consistently dies during cache clearing so i figured it had to be that > honestly my best bet is locksmith and queuing > https://github.com/whitfin/cachex/blob/master/lib/cachex/actions/clear.ex#L26 > clear is thrown into a locksmith transaction > locksmith says > >If the process is already in a transactional context, the provided function will be executed immediately. Otherwise the required keys will be locked until the provided function has finished executing. > so if we get 2 clears too close together, maybe it locks, then doesn't like the next clear?
2022-07-14 11:35:33 +00:00
# If we have a lot of caches, default max_restarts can cause test
# resets to fail.
# Go for the default 3 unless we're in test
2024-01-20 23:14:27 +00:00
max_restarts = Application.get_env(:pleroma, __MODULE__)[:max_restarts]
Allow higher amount of restarts for Pleroma.Repo during testing This was done by floatingghost as part of a bigger commit in Akkoma. See <https://akkoma.dev/AkkomaGang/akkoma/src/commit/37ae047e1652c4089934434ec79f393c4c839122/lib/pleroma/application.ex#L83>. As explained in <https://ihatebeinga.live/objects/860d23e1-dc64-4b07-8b4d-020b9c56cff6> > there are so many caches that clearing them all can nuke the supervisor, which by default will become an hero if it gets more than 3 restarts in <5 seconds And further down the thread > essentially we've got like 11 caches (https://akkoma.dev/AkkomaGang/akkoma/src/commit/37ae047e1652c4089934434ec79f393c4c839122/lib/pleroma/application.ex#L165) > then in test we fetch them all (https://akkoma.dev/AkkomaGang/akkoma/src/branch/develop/test/support/data_case.ex#L50) and call clear on them > so if this clear fails on any 3 of them, the pleroma supervisor itself will die How it fails? > idk maybe cachex dies, maybe :ets does a weird thing > it doesn't log anything, it just consistently dies during cache clearing so i figured it had to be that > honestly my best bet is locksmith and queuing > https://github.com/whitfin/cachex/blob/master/lib/cachex/actions/clear.ex#L26 > clear is thrown into a locksmith transaction > locksmith says > >If the process is already in a transactional context, the provided function will be executed immediately. Otherwise the required keys will be locked until the provided function has finished executing. > so if we get 2 clears too close together, maybe it locks, then doesn't like the next clear?
2022-07-14 11:35:33 +00:00
opts = [strategy: :one_for_one, name: Pleroma.Supervisor, max_restarts: max_restarts]
Supervisor.start_link(children, opts)
2017-03-17 16:09:58 +00:00
end
2019-12-05 13:29:17 +00:00
def load_custom_modules do
2020-02-11 07:12:57 +00:00
dir = Config.get([:modules, :runtime_dir])
2019-12-05 13:18:25 +00:00
if dir && File.exists?(dir) do
dir
2019-12-09 11:23:07 +00:00
|> Pleroma.Utils.compile_dir()
2019-12-05 13:18:25 +00:00
|> case do
{:error, _errors, _warnings} ->
raise "Invalid custom modules"
{:ok, modules, _warnings} ->
if Application.get_env(:pleroma, __MODULE__)[:load_custom_modules] do
2019-12-06 10:05:09 +00:00
Enum.each(modules, fn mod ->
2019-12-09 12:11:54 +00:00
Logger.info("Custom module loaded: #{inspect(mod)}")
2019-12-06 10:05:09 +00:00
end)
end
2019-12-05 13:18:25 +00:00
:ok
end
end
end
2019-08-14 15:55:17 +00:00
defp cachex_children do
[
build_cachex("used_captcha", ttl_interval: seconds_valid_interval()),
build_cachex("user", default_ttl: 25_000, ttl_interval: 1000, limit: 2500),
build_cachex("object", default_ttl: 25_000, ttl_interval: 1000, limit: 2500),
build_cachex("rich_media", default_ttl: :timer.minutes(120), limit: 5000),
build_cachex("scrubber", limit: 2500),
build_cachex("scrubber_management", limit: 2500),
2019-09-09 18:53:08 +00:00
build_cachex("idempotency", expiration: idempotency_expiration(), limit: 2500),
2019-08-12 10:13:01 +00:00
build_cachex("web_resp", limit: 2500),
build_cachex("emoji_packs", expiration: emoji_packs_expiration(), limit: 10),
2020-06-14 18:02:57 +00:00
build_cachex("failed_proxy_url", limit: 2500),
build_cachex("failed_media_helper_url", default_ttl: :timer.minutes(15), limit: 2_500),
build_cachex("banned_urls", default_ttl: :timer.hours(24 * 30), limit: 5_000),
build_cachex("chat_message_id_idempotency_key",
expiration: chat_message_id_idempotency_key_expiration(),
limit: 500_000
),
build_cachex("rel_me", limit: 2500),
build_cachex("host_meta", default_ttl: :timer.minutes(120), limit: 5000)
2019-08-14 15:55:17 +00:00
]
end
2019-08-12 10:13:01 +00:00
defp emoji_packs_expiration,
do: expiration(default: :timer.seconds(5 * 60), interval: :timer.seconds(60))
2019-08-14 15:55:17 +00:00
defp idempotency_expiration,
2019-08-14 18:01:11 +00:00
do: expiration(default: :timer.seconds(6 * 60 * 60), interval: :timer.seconds(60))
2019-08-14 15:55:17 +00:00
defp chat_message_id_idempotency_key_expiration,
do: expiration(default: :timer.minutes(2), interval: :timer.seconds(60))
2019-08-14 15:55:17 +00:00
defp seconds_valid_interval,
2020-02-11 07:12:57 +00:00
do: :timer.seconds(Config.get!([Pleroma.Captcha, :seconds_valid]))
2019-08-14 15:55:17 +00:00
2020-07-03 16:18:08 +00:00
@spec build_cachex(String.t(), keyword()) :: map()
def build_cachex(type, opts),
2019-08-14 18:01:11 +00:00
do: %{
id: String.to_atom("cachex_" <> type),
start: {Cachex, :start_link, [String.to_atom(type <> "_cache"), opts]},
type: :worker
}
2019-08-14 15:55:17 +00:00
defp shout_enabled?, do: Config.get([:shout, :enabled])
2019-08-14 15:55:17 +00:00
2024-01-21 00:39:13 +00:00
defp streamer_registry do
if Application.get_env(:pleroma, __MODULE__)[:streamer_registry] do
[
{Registry,
[
name: Pleroma.Web.Streamer.registry(),
keys: :duplicate,
partitions: System.schedulers_online()
]}
]
else
[]
end
end
defp background_migrators do
if Application.get_env(:pleroma, __MODULE__)[:background_migrators] do
[
Pleroma.Migrators.HashtagsTableMigrator,
Pleroma.Migrators.ContextObjectsDeletionMigrator
]
else
[]
end
end
defp shout_child(true) do
[
2020-08-03 23:34:58 +00:00
Pleroma.Web.ShoutChannel.ShoutChannelState,
{Phoenix.PubSub, [name: Pleroma.PubSub, adapter: Phoenix.PubSub.PG2]}
]
2019-08-14 15:55:17 +00:00
end
defp shout_child(_), do: []
2019-08-14 15:55:17 +00:00
2024-01-21 00:39:13 +00:00
defp task_children do
children = [
2019-09-17 14:44:52 +00:00
%{
id: :web_push_init,
start: {Task, :start_link, [&Pleroma.Web.Push.init/0]},
restart: :temporary
}
]
if Application.get_env(:pleroma, __MODULE__)[:internal_fetch] do
children ++
[
%{
id: :internal_fetch_init,
start: {Task, :start_link, [&Pleroma.Web.ActivityPub.InternalFetchActor.init/0]},
restart: :temporary
}
]
else
children
end
2019-09-17 14:44:52 +00:00
end
2020-02-11 07:12:57 +00:00
# start hackney and gun pools in tests
defp http_children(adapter) do
if Application.get_env(:pleroma, __MODULE__)[:test_http_pools] do
http_children_hackney() ++ http_children_gun()
else
cond do
match?(Tesla.Adapter.Hackney, adapter) -> http_children_hackney()
match?(Tesla.Adapter.Gun, adapter) -> http_children_gun()
true -> []
end
end
2020-02-11 07:12:57 +00:00
end
2024-01-21 00:39:13 +00:00
defp http_children_hackney do
2020-02-11 07:12:57 +00:00
pools = [:federation, :media]
pools =
if Config.get([Pleroma.Upload, :proxy_remote]) do
[:upload | pools]
else
pools
end
for pool <- pools do
options = Config.get([:hackney_pools, pool])
:hackney_pool.child_spec(pool, options)
end
end
2024-01-21 00:39:13 +00:00
defp http_children_gun do
2020-05-17 19:16:02 +00:00
Pleroma.Gun.ConnectionPool.children() ++
[{Task, &Pleroma.HTTP.AdapterHelper.Gun.limiter_setup/0}]
end
2020-03-03 09:19:29 +00:00
2020-11-20 08:37:01 +00:00
@spec limiters_setup() :: :ok
def limiters_setup do
config = Config.get(ConcurrentLimiter, [])
[
Pleroma.Web.RichMedia.Helpers,
Pleroma.Web.ActivityPub.MRF.MediaProxyWarmingPolicy,
Pleroma.Search
]
|> Enum.each(fn module ->
mod_config = Keyword.get(config, module, [])
max_running = Keyword.get(mod_config, :max_running, 5)
max_waiting = Keyword.get(mod_config, :max_waiting, 5)
ConcurrentLimiter.new(module, max_running, max_waiting)
end)
end
2017-03-17 16:09:58 +00:00
end