Compare commits

...

508 commits

Author SHA1 Message Date
asonix
6ff7b59778 Prepare 0.3.116 2024-12-09 20:59:14 -06:00
asonix
d9da352558 Update teloxide 2024-12-09 19:40:33 -06:00
asonix
aea64c726a Update opentelemetry stack 2024-12-09 19:37:35 -06:00
asonix
e243bd4600 Update bcrypt 2024-12-09 19:30:59 -06:00
asonix
a452fb91ba Fix build due to reqwest-tracing semver break 2024-12-09 19:30:30 -06:00
asonix
35acc916f2 flake: Use nixos-24.11 stable 2024-12-09 19:23:49 -06:00
asonix
752067ffb7 Update dependencies (minor & point) 2024-08-05 16:45:32 -05:00
asonix
b308e080af Update console-subscriber 2024-08-05 16:44:39 -05:00
asonix
6ab37dc06f Update opentelemetry stack 2024-08-05 16:43:51 -05:00
asonix
a23b30cc91 Bump version 2024-07-09 16:45:38 -05:00
asonix
1b58a50d44 Merge pull request 'Start upgrading to hyper and http 1' (#3) from asonix/hyper-1 into main
Reviewed-on: https://git.asonix.dog/asonix/relay/pulls/3
2024-07-09 21:39:10 +00:00
asonix
308a945283 Start upgrading to http1 2024-07-09 16:32:05 -05:00
asonix
86cab5d2d9 Update opentelemetry stack 2024-07-09 16:28:00 -05:00
asonix
a70e75665b Update console-subscriber 2024-07-09 16:26:33 -05:00
asonix
f1792c8eb3 Update dashmap 2024-07-09 16:26:00 -05:00
asonix
d918ef1495 Update rustls 2024-07-09 16:24:44 -05:00
asonix
2870789e1f Update background jobs, async-cpupool, metrics 2024-07-09 16:21:53 -05:00
asonix
cda92e7523 Update flake 2024-06-23 13:57:40 -05:00
asonix
43b03a176c Don't fail publish on clippy warnings
unfixable without ructe release
2024-06-23 13:57:28 -05:00
asonix
a465d1ae5b Allow versions to be unused 2024-06-23 13:56:37 -05:00
asonix
4fa7674a35 Move cargo config to config.toml 2024-06-23 13:55:10 -05:00
asonix
8c14d613f7 Prepare v0.3.114 2024-06-23 13:45:10 -05:00
asonix
aff2431681 Update dependencies (minor & point) 2024-06-23 13:42:26 -05:00
asonix
5aa97212b3 Impose limits on the size of downloaded content from foreign servers 2024-06-23 13:35:24 -05:00
asonix
97567cf598 Prepare v0.3.113 2024-05-01 15:45:53 -05:00
asonix
4c663f399e Update dependencies (minor & point) 2024-05-01 15:43:53 -05:00
asonix
8a3256f52a Avoid deadlock of iterating over tree while transacting on that tree 2024-05-01 15:43:08 -05:00
asonix
13a2653fe8 Remove prerelease flag 2024-04-23 14:00:04 -05:00
asonix
8dd9a86d22 Use match_pattern rather than path for metrics differentiation 2024-04-21 11:44:16 -05:00
asonix
5c0c0591dd Prepare 0.3.112 2024-04-14 22:47:38 -05:00
asonix
04ca4e5401 Stable async-cpupool 2024-04-14 19:53:31 -05:00
asonix
1de1d76506 prerelease 2024-04-13 13:57:12 -05:00
asonix
dd9225bb89 Prepare v0.3.111 2024-04-07 11:53:24 -05:00
asonix
b577730836 Fix build 2024-04-07 11:40:57 -05:00
asonix
21883c168b BROKEN! Start collecting more metrics about various sizes 2024-04-07 11:04:03 -05:00
asonix
76a0c79369 Update base64, ammonia 2024-04-06 13:42:29 -05:00
asonix
6444782db9 Bump version, Update dependencies (minor & point) 2024-04-06 13:34:54 -05:00
asonix
14aea3256d Update dependencies (minor & point) 2024-03-23 19:10:13 -05:00
asonix
f4f2aa2025 Update flake 2024-03-23 19:09:53 -05:00
asonix
615271fe80 Update opentelemetry dependencies, other dependencies minor & point 2024-03-10 20:09:16 -05:00
asonix
4aed601664 No console by default 2024-02-25 21:08:17 -06:00
asonix
bf21f05aca Strip release binaries 2024-02-12 15:16:20 -06:00
asonix
e69f6c6edb Remove prerelease marker 2024-02-12 14:32:46 -06:00
asonix
1e05eb4fe4 Bump version 2024-02-12 13:46:44 -06:00
asonix
7f09ac3edd Update dependencies (minor & point) 2024-02-12 13:42:45 -06:00
asonix
4788ad332a Update image version in docker-compose 2024-02-11 14:56:26 -06:00
asonix
1fd82915d3 Remove bad argument 2024-02-11 14:52:57 -06:00
asonix
0472082a97 Add actions, remove drone 2024-02-11 14:49:22 -06:00
asonix
c8250acce7 Bump version 2024-02-05 00:25:15 -06:00
asonix
b074759eb4 Update background-jobs, rework errors 2024-02-05 00:24:49 -06:00
asonix
ed399f1531 Be more accurate for reqwest errors 2024-02-04 20:51:25 -06:00
asonix
7e39acdcb0 Update config 2024-02-04 20:28:18 -06:00
asonix
894d096622 Bump version 2024-02-04 20:25:59 -06:00
asonix
05e31254ba Update rustls for actix-web, log less 2024-02-04 20:25:50 -06:00
asonix
086ca9fbf2 Support live-reloading TLS certificate 2024-01-31 16:49:23 -06:00
asonix
603fcc6e57 Bump version 2024-01-18 13:35:00 -05:00
asonix
6b8f15ee08 Use stable background-jobs 2024-01-18 13:34:10 -05:00
asonix
53939f8ae8 Go back to job-server per core 2024-01-18 12:31:26 -05:00
asonix
b53b34c515 Update dependencies (minor & point) 2024-01-14 16:16:56 -05:00
asonix
6dcdf2fc87 clippy 2024-01-14 16:10:32 -05:00
asonix
83e5619eb4 Update flake.lock 2024-01-14 16:10:19 -05:00
asonix
9090bb5c62 Bump version 2024-01-14 15:59:16 -05:00
asonix
d862bf8106 Use tokio rather than actix-rt 2024-01-14 15:56:07 -05:00
asonix
417553e643 Bump version 2024-01-09 18:09:51 -06:00
asonix
a2456c3d5f Update dependencies (minor & point) 2024-01-09 18:08:10 -06:00
asonix
2b3cb8db92 clippy 2024-01-08 17:10:31 -06:00
asonix
18f1096221 Update version 2024-01-08 17:06:02 -06:00
asonix
c640567206 Update to newest background-jobs, implement Job rather than ActixJob 2024-01-08 17:00:15 -06:00
asonix
36aa9120ea Update metrics 2024-01-07 12:43:58 -06:00
asonix
e377f3988b Update minify-html, dependencies (minor & point) 2024-01-07 12:10:43 -06:00
asonix
8c811710ac Bump version 2023-11-25 21:27:05 -06:00
asonix
e4f665d75f use stable async-cpupool 2023-11-25 21:17:59 -06:00
asonix
4383357abe update flake 2023-11-25 20:27:20 -06:00
asonix
f70af22c6a clippy 2023-11-25 20:27:11 -06:00
asonix
8bce3d172f Update streem 2023-11-25 20:20:38 -06:00
asonix
8540e93469 Use async-cpupool 2023-11-25 20:18:11 -06:00
asonix
708e7da301 Update opentelemetry, ring, http-signature-normalization, tracing-log 2023-11-25 20:16:13 -06:00
asonix
a0f9827e18 Bump version 2023-09-09 18:10:31 -04:00
asonix
9ebed87cde Update http-signature-normalization-actix 2023-09-09 18:09:24 -04:00
asonix
ae3d19a774 Bump version 2023-09-09 17:31:42 -04:00
asonix
2a5e769afb Update http-signature-normalization-actix 2023-09-09 17:30:07 -04:00
asonix
f4839d688e Update dependencies (minor & point) 2023-09-09 16:52:53 -04:00
asonix
206db2079f Remove futures-util dependency 2023-09-09 16:46:22 -04:00
asonix
6714fe48ed Bump version, enable tokio_unstable for console 2023-09-08 19:15:19 -06:00
asonix
804d22ee81 Enable different breaker failure cases for different endpoints
Additionally, don't count 4xx towards succeeding a breaker
2023-09-08 19:11:24 -06:00
asonix
5a6fbbcb77 Update tracing-opentelemetry 2023-09-08 18:41:55 -06:00
asonix
ea926f73c4 Update dependencies (minor & point) 2023-09-08 18:39:37 -06:00
asonix
53b14c3329 Bump version 2023-08-29 23:05:41 -05:00
asonix
9b1fad0e2e Update rustls 2023-08-29 22:15:41 -05:00
asonix
a8ba53fe11 Update flake 2023-08-26 12:20:36 -05:00
asonix
927fb91a5e Update flume 2023-08-17 17:11:08 -05:00
asonix
4d4093c15a Bump version 2023-08-17 17:10:24 -05:00
asonix
75df271b58 Switch from awc to reqwest, enable HTTP Proxies 2023-08-17 17:09:35 -05:00
asonix
73b429ab51 Update opentelemetry 2023-08-05 12:47:52 -05:00
asonix
2f57c855a4 Bump version 2023-08-04 19:01:05 -05:00
asonix
cdbde9519e Update dependencies (minor & point) 2023-08-04 18:57:53 -05:00
asonix
2cbe4864c3 Switch to ring for crypto 2023-08-04 18:57:53 -05:00
asonix
731a831070 Bump version 2023-07-28 17:47:51 -05:00
asonix
795d3238ad Hide nodes that failed breakers from index page 2023-07-28 17:46:23 -05:00
asonix
60abec2b96 Bump version 2023-07-27 13:48:01 -05:00
asonix
e63e1f975e Use verify spawner in routes 2023-07-27 13:39:31 -05:00
asonix
5430da58aa Update description in nix file 2023-07-27 13:14:46 -05:00
asonix
927f15c4ca Update dependencies (minor & point) 2023-07-27 13:11:00 -05:00
asonix
ef57576c57 Bump version 2023-07-27 13:10:00 -05:00
asonix
7438b0c5d0 Use verify spawner in all cases in verify path 2023-07-27 13:09:03 -05:00
asonix
f06316c6b2 Bump version 2023-07-27 12:20:41 -05:00
asonix
f86bbc95ae Pass spawner to digest middleware 2023-07-27 12:20:05 -05:00
asonix
a500824a7d Shorten thread names 2023-07-27 11:21:44 -05:00
asonix
433c981a21 Simplify < RATIO, bump version 2023-07-27 11:10:29 -05:00
asonix
f3ff8ae5f7 Split available signature threads between sign & verify 2023-07-27 11:08:20 -05:00
asonix
f24685e700 Allow naming spawner threads 2023-07-27 10:53:01 -05:00
asonix
5de244b848 Add complete to signature thread duration 2023-07-27 10:39:24 -05:00
asonix
769f7451f9 Simplify signature thread 2023-07-27 10:19:20 -05:00
asonix
fff9bf112d Bump version 2023-07-27 09:57:13 -05:00
asonix
05c266c23c Give verify & admin a different queue than deliver 2023-07-27 09:55:13 -05:00
asonix
2a7fed743f Bump version 2023-07-27 09:26:49 -05:00
asonix
240eee730c Add more metrics around spawn-blocking 2023-07-27 09:26:16 -05:00
asonix
8071c6ce3f Make signature threads configurable 2023-07-26 23:04:04 -05:00
asonix
78dcce5a08 Bump version 2023-07-26 22:52:13 -05:00
asonix
11d81683e3 Add logging around parallelism 2023-07-26 22:52:13 -05:00
asonix
5d526c60fe Clippy :( 2023-07-26 19:29:03 -05:00
asonix
73c7150f97 Use spawner for CPU-bound operations 2023-07-26 18:11:44 -05:00
asonix
7cfebd927e Bump version 2023-07-26 18:04:09 -05:00
asonix
d97cc4e5a4 Use custom threadpool for client signatures 2023-07-26 18:03:21 -05:00
asonix
8ff4961ded Bump version 2023-07-25 16:07:18 -05:00
asonix
970672a392 Make client timeout configurable 2023-07-25 16:06:56 -05:00
asonix
dfbd5c9035 Add deliver_concurrency to readme 2023-07-25 14:48:09 -05:00
asonix
d365e34f47 Bump version 2023-07-25 14:46:44 -05:00
asonix
de97adc2d6 Update dependencies (minor & point) 2023-07-25 14:45:46 -05:00
asonix
d1c6f6ff5d Make delivery concurrency configurable 2023-07-25 14:45:15 -05:00
asonix
582f311a20 Bump version 2023-07-24 13:20:09 -05:00
asonix
09436746c8 Update dependencies 2023-07-24 13:19:40 -05:00
asonix
a65ff19f6a Remove unneeded mut 2023-07-21 16:36:07 -05:00
asonix
bcdef5caa1 Don't clippy dependencies 2023-07-21 16:29:58 -05:00
asonix
4651fcc9d2 Update bcrypt, lru 2023-07-19 20:25:24 -05:00
asonix
fb6d8af1ca Update flake, dependencies 2023-07-19 20:23:44 -05:00
asonix
9779518dc1 Allow rel attribute in local & footer blurb
Patch from Jaehong Kang <sinoru@me.com>
2023-07-16 22:33:43 -05:00
asonix
7a00229508 Bump version, update docs 2023-06-23 15:15:27 -05:00
asonix
346664396c Run workers on handler threads 2023-06-23 15:08:59 -05:00
asonix
74f35faa22 Keep client in thread-local storage 2023-06-23 15:01:56 -05:00
asonix
e005adfcf8 Bump version 2023-06-23 14:32:14 -05:00
asonix
d40db33eb5 Don't drop and rebuild clients, share clients better 2023-06-23 14:27:20 -05:00
asonix
246e79b261 Bump version 2023-06-23 13:47:40 -05:00
asonix
8d565a1fbe Add ability to tweak client pool size 2023-06-23 13:46:13 -05:00
asonix
18ff2864a0 Update dependencies (minor & point) 2023-06-23 13:34:39 -05:00
asonix
4b71e56f31 Update nixpkgs 2023-06-23 13:34:21 -05:00
asonix
9b4f6b47a6 cargo update 2023-06-03 13:13:37 -05:00
asonix
5fa1d4983a Update nix 2023-06-03 13:10:19 -05:00
asonix
d69a80ebe8 Update dependencies, not rustls 2023-05-24 10:19:34 -05:00
asonix
a9a47e8ee2 Update flake 2023-04-27 19:54:15 -05:00
asonix
ab2dbfb439 Update metrics, rsa 2023-04-27 19:34:23 -05:00
asonix
73bf4d1597 Remove unneeded .into_iter() 2023-03-23 14:37:33 -05:00
asonix
2cb5ad9917 Replace Double with Fanout 2023-03-23 13:51:32 -05:00
asonix
afd4105d0f Add flake 2023-03-23 13:51:23 -05:00
asonix
d644e83733 Bump version 2023-02-25 15:14:24 -06:00
asonix
ae91aa8fa7 Update bcrypt 2023-02-25 15:06:18 -06:00
asonix
73c016d418 Update deps 2023-02-25 15:04:30 -06:00
asonix
a1ea5d676c Rework misskey fetch to reuse deliver plumbing
Only count server errors towards failed breakers
2023-02-25 15:02:16 -06:00
perillamint
667d586160 Send dummy JSON when trying Misskey API endpoint
From Misskey 13, Misskey expects valid JSON (does not care its content
though) in POST body. To workaround this, send empty JSON object when
requesting Misskey API endpoint
2023-02-25 14:34:38 -06:00
perillamint
4a7775b56d Misskey metadata support
This commit implements misskey metadata support and corresponding test
for From<MskyMeta> implementation

Also, it is good to note that, Misskey does not return 404 but 200 OK
when they poked at nonexistant endpoint, so the implementation should
handle for invalid json case
2023-02-25 14:34:22 -06:00
asonix
9b809913ad Add note about JSON-LD problems 2023-02-11 18:16:06 -06:00
asonix
a952b528df Use transpose in a couple places 2023-02-05 21:09:47 -06:00
asonix
b5138fc16d Bump version 2023-01-29 13:23:11 -06:00
asonix
0e9b88a7ae Bump deps 2023-01-29 13:21:53 -06:00
asonix
f9cad61049 Add healthcheck for db, new clippy lints 2023-01-29 13:21:36 -06:00
Tealk
96547230bc
update Pleroma text
Signed-off-by: Tealk <tealk@rollenspiel.monster>
2023-01-28 23:45:31 +01:00
asonix
c11ff17192 Bump version 2023-01-23 08:58:07 -06:00
asonix
e93dd2da56 Update teloxide 2023-01-23 08:57:16 -06:00
asonix
34dc1a2281 Update rsa 2023-01-23 08:56:18 -06:00
asonix
9cdebeae4c Update base64, ructe 2023-01-23 08:38:55 -06:00
asonix
662620be46 Only show open_registrations: false when restricted mode is enabled 2023-01-23 08:29:32 -06:00
asonix
5488acb59d Fix docker volume mount in readme 2023-01-03 15:17:56 -06:00
asonix
4998cd3a56 Bump version 2023-01-02 12:43:51 -06:00
asonix
f0a8862922 Don't prometheus exporter for relay client 2023-01-02 12:43:32 -06:00
asonix
b6a10c4e65 Apply patch from perillamint on github
Document REPOSITORY_COMMIT_BASE envvar
2023-01-01 10:29:28 -06:00
asonix
3a14242a91 Apply patch from perillamint on github
Accept REPOSITORY_COMMIT_BASE envvar to build repository url
2023-01-01 10:28:52 -06:00
asonix
f5fed2fce1 Apply patch from perillamint on github
One missing bit Debug implementation for source_url
2023-01-01 10:19:11 -06:00
asonix
5faeaf6371 Revert "Apply patch from perillamint on github"
This reverts commit f291b24269.
2023-01-01 10:01:39 -06:00
asonix
f291b24269 Apply patch from perillamint on github
Show repository URL with commit reference
2023-01-01 09:47:21 -06:00
asonix
5f5c34640f Apply patch from perillamint on github
use git hash to describe version number
2023-01-01 09:46:44 -06:00
asonix
d4e51a1afa Add scrape endpoint to .env 2022-12-26 11:20:02 -06:00
asonix
fafba69258 Add optional prometheus scrape endpoint 2022-12-26 10:57:16 -06:00
asonix
07b961c28f Bump deps 2022-12-21 16:59:19 -06:00
asonix
30dd16a889 Bump version 2022-12-21 16:58:17 -06:00
asonix
88b0383084 Keep track of when servers were last seen 2022-12-21 16:51:17 -06:00
asonix
b49eeaf822 Bump version 2022-12-19 22:25:27 -06:00
asonix
943f679a69 Allow activities without IDs, fetch actor unchecked 2022-12-19 22:24:58 -06:00
asonix
37b2afe344 Bump version 2022-12-19 21:46:27 -06:00
asonix
4e5fabce5f Also debug Kind in inbox 2022-12-19 21:45:52 -06:00
asonix
689d85befb Bump version 2022-12-19 21:07:25 -06:00
asonix
40eb12258d Record id in inbox route 2022-12-19 21:05:53 -06:00
asonix
efcec29d7b Remove unused docker-related files 2022-12-19 16:32:16 -06:00
asonix
62a886d0bf Bump version 2022-12-19 16:31:51 -06:00
asonix
163e480076 Update deps 2022-12-19 16:30:48 -06:00
asonix
675fddcfeb Support Remove activity, forward verbatim 2022-12-19 16:08:39 -06:00
asonix
359ec68aa0 Add example systemd configuration 2022-12-19 15:52:47 -06:00
asonix
565a94d756 clippy 2022-12-19 12:23:06 -06:00
asonix
815c18b899 Update version number in various places 2022-12-19 12:17:08 -06:00
asonix
fbcbf141dd Bump version 2022-12-19 11:46:49 -06:00
asonix
cf7a25f935 Consider NoSignature a BadRequest 2022-12-19 11:44:50 -06:00
asonix
b56bddccb4 Allow Signature to be missing if kind is Delete, return early without additional processing 2022-12-19 11:39:30 -06:00
asonix
886c7d0ac6 Apply patch from perallamint on github
Temporary fix: allow signing bypass for 410 gone actors
DIRTY FIX: implement sigcheck_bypass for 410'ing actors
2022-12-19 09:44:04 -06:00
asonix
178d23bcbd Bump deps 2022-12-14 20:17:14 -06:00
asonix
549eb47202 Bump version 2022-12-13 23:41:06 -06:00
asonix
5968cb8953 bump deps 2022-12-13 23:40:53 -06:00
asonix
c5e254dad6 Update deps 2022-12-13 23:37:09 -06:00
asonix
430ebec810 Improve tracing, immediately stringify spantrace, remove join macros 2022-12-13 23:36:40 -06:00
asonix
c15f591bc8 Add punctuation to readme 2022-12-13 10:56:25 -06:00
asonix
5d69eaf2ab Add note about Add activity to README 2022-12-13 10:46:58 -06:00
asonix
43b70f88a7 Apply patch from perallamint on github
clippy: unnecessary lifetime annotation on static strings

Since string literal constant already has static lifetime, it is not
necessary to explicitly annotate it with 'static.
2022-12-13 10:39:25 -06:00
asonix
a0dc2363f6 Add support for Add activity - forward verbatim 2022-12-13 10:35:16 -06:00
asonix
9d68ccd834 Update deps 2022-12-12 11:06:23 -06:00
asonix
a8b8325557 Update deps 2022-12-12 10:56:53 -06:00
asonix
6082def854 Bump version 2022-12-09 18:04:15 -06:00
asonix
31021e80e4 Bump deps 2022-12-09 18:03:36 -06:00
asonix
f4db90b699 Use sync RwLock for lru access 2022-12-09 17:47:45 -06:00
asonix
d834537300 Bump http-signature-normalization-actix 2022-12-08 21:15:43 -06:00
asonix
c18760d57f Bump version 2022-12-08 15:14:12 -06:00
asonix
8575439d88 Bump deps 2022-12-08 15:14:04 -06:00
asonix
c543e8b4eb Bump version 2022-12-06 18:53:36 -06:00
asonix
a0fbf9d236 Bump activitystreams again 2022-12-06 18:53:19 -06:00
asonix
b9dba28207 Bump activitystreams 2022-12-06 18:21:55 -06:00
asonix
b5dc3e7c08 Wrap whole main in actix_rt, fixes opentelemetry 2022-12-06 17:55:02 -06:00
asonix
4d8e1a7241 Enable lazy loading for images 2022-11-23 13:32:27 -06:00
asonix
89a9e20d4a Bump version 2022-11-23 12:58:29 -06:00
asonix
39b8b1d3fa Add compression middleware - not zstd 2022-11-23 12:57:56 -06:00
asonix
96eb028145 Revert "Add compression middleware"
This reverts commit aa8ddfa637.
2022-11-23 12:50:46 -06:00
asonix
aad0cc990e Bump version 2022-11-23 12:39:19 -06:00
asonix
aa8ddfa637 Add compression middleware 2022-11-23 12:30:17 -06:00
asonix
c6adc9f77b Propogate Span into bcrypt verify 2022-11-23 11:58:44 -06:00
asonix
68a0b7c574 Bump version 2022-11-23 11:53:58 -06:00
asonix
d7adaeb38d Move joining instructions before server list 2022-11-23 11:52:05 -06:00
asonix
149ec1d14f Minify HTML 2022-11-23 11:51:51 -06:00
asonix
d7a720b6c4 clippy, replace indexmap with btreemap 2022-11-23 11:25:13 -06:00
asonix
e2f3727d07 Bump version 2022-11-23 11:21:05 -06:00
asonix
e9f312bed5 Measure bcrypt, change DashMap to RwLock<HashMap for collector 2022-11-23 11:13:30 -06:00
asonix
1a638f7f8d Improve debug middleware 2022-11-23 10:44:11 -06:00
asonix
ed0ea6521e Improve Timings middleware 2022-11-23 10:44:01 -06:00
asonix
e987149757 Bump version 2022-11-23 08:27:36 -06:00
asonix
01e283a065 Update deps 2022-11-23 08:27:05 -06:00
asonix
ab7d940de9 Improve error in signature verification (again) 2022-11-22 15:25:42 -06:00
asonix
5cd0b21ae3 Improve error in signature verification 2022-11-22 15:11:56 -06:00
asonix
b53ec4d980 More useful default logging 2022-11-21 23:12:31 -06:00
asonix
c3b50bc94e Bump version 2022-11-21 14:26:51 -06:00
asonix
88329a79e2 clippy 2022-11-21 14:25:24 -06:00
asonix
a77a4cde22 Add an 'About' section to the relay 2022-11-21 14:23:37 -06:00
asonix
5043892981 Fix compile issue 2022-11-21 11:28:25 -06:00
asonix
8afc16786d WIP: Don't collect path-based metrics for 404, 405 2022-11-21 11:27:22 -06:00
asonix
cdaf3b2fa3 Bump deps 2022-11-21 11:16:57 -06:00
asonix
37e3b17966 Bump version 2022-11-21 11:16:48 -06:00
asonix
9133dd7688 Add optional footer blurb 2022-11-21 11:16:21 -06:00
asonix
a0195d94aa Bump version 2022-11-20 22:47:49 -06:00
asonix
d8f3f1d0e9 Add one more log in TLS config 2022-11-20 22:47:20 -06:00
asonix
205e794b9e Add more logging around TLS config issues 2022-11-20 22:46:20 -06:00
asonix
73cc4862d9 Bump deps 2022-11-20 21:43:09 -06:00
asonix
981a6779bf Bump version 2022-11-20 21:42:59 -06:00
asonix
5d33dba103 Add support for binding TLS 2022-11-20 21:42:38 -06:00
asonix
a3eb785b9e Update defaults to be more prod friendly 2022-11-20 16:25:27 -06:00
asonix
efc918a826 Update deps 2022-11-20 12:09:43 -06:00
asonix
13cd308358 clippy 2022-11-20 12:09:17 -06:00
asonix
df70a28ca3 Bump version 2022-11-20 12:07:44 -06:00
asonix
162dd1cb0e Add more launch logging 2022-11-20 12:07:27 -06:00
asonix
df3063e75f Improve concurrency for larger systems 2022-11-20 12:06:10 -06:00
asonix
d44db2eab5 Bump version 2022-11-19 23:44:52 -06:00
asonix
7ec56d2af2 clippy 2022-11-19 23:44:35 -06:00
asonix
9f6e0bc722 Bump version 2022-11-19 23:35:20 -06:00
asonix
3500f85f44 Move blocking setup out of actix systems 2022-11-19 23:35:00 -06:00
asonix
a154fbb504 Bump version 2022-11-19 22:39:27 -06:00
asonix
9ede941ff7 Increase concurrency 2022-11-19 22:38:58 -06:00
asonix
4267f52a7e Bump deps 2022-11-19 21:52:06 -06:00
asonix
9272ba0d4c More logging when ending main 2022-11-19 21:51:04 -06:00
asonix
8d0d39b1fc Bump version 2022-11-19 21:33:09 -06:00
asonix
787c8312bc Make better use of cores for jobs 2022-11-19 21:32:45 -06:00
asonix
95f98ec052 Update readme 2022-11-19 20:40:17 -06:00
asonix
8fa24aa243 Bump version 2022-11-19 20:36:24 -06:00
asonix
cecc35ae85 Add timings metrics middleware 2022-11-19 20:35:45 -06:00
asonix
4e1a782bea Fix merge 2022-11-19 18:57:34 -06:00
asonix
9a9d09c0c4 Bump version 2022-11-19 18:27:16 -06:00
asonix
99c3ec0b75 Improve presentation of stats 2022-11-19 18:26:47 -06:00
asonix
f892a50f2c Add metrics printer 2022-11-19 17:45:01 -06:00
asonix
c322798ba3 Add metrics collector, admin route 2022-11-19 17:28:15 -06:00
asonix
c8b81bb9aa Add metrics dependencies 2022-11-19 14:47:47 -06:00
asonix
902ce5d3c2 New module structure 2022-11-19 14:47:32 -06:00
asonix
261805004b Update background-jobs 2022-11-19 14:45:13 -06:00
asonix
10777c32ab Bump version 2022-11-18 21:08:34 -06:00
asonix
8f7d8b1f00 Add node count to Connected Servers heading 2022-11-18 21:08:16 -06:00
asonix
9333e9f8fd Bump deps 2022-11-18 19:01:38 -06:00
asonix
2023d7ba54 Bump version 2022-11-18 19:01:14 -06:00
asonix
a0dc917dfe Prioritize open-reg servers on index page 2022-11-18 19:00:45 -06:00
asonix
b901322706 Bump version 2022-11-17 23:51:40 -06:00
asonix
350b1c6bd3 Update deps 2022-11-17 23:51:20 -06:00
asonix
07557d31d2 Bump version 2022-11-17 22:40:21 -06:00
asonix
e9303ad9f6 Remove media caching, just proxy 2022-11-17 22:39:26 -06:00
asonix
094331a447 Bump version 2022-11-17 19:29:01 -06:00
asonix
959201fa97 Remove errors when our signature is rejected 2022-11-17 19:28:17 -06:00
asonix
4df14c7602 Bump deps 2022-11-17 18:23:15 -06:00
asonix
9ac7854081 clippy 2022-11-17 14:16:21 -06:00
asonix
bfc743354f Bump deps 2022-11-17 14:16:15 -06:00
asonix
547ef6c3e9 Bump version 2022-11-17 14:14:19 -06:00
asonix
ebdc739c84 Make admin API & client work 2022-11-17 14:13:41 -06:00
asonix
fe844a807f Start work on admin API 2022-11-17 13:14:45 -06:00
asonix
08374d0382 Merge branch 'cmdline-ls' 2022-11-16 14:59:35 -06:00
asonix
04fcc83d29 Bump version 2022-11-16 14:30:20 -06:00
asonix
448a907ab0 Reset breakers when receiving activities 2022-11-16 14:29:57 -06:00
asonix
2c4901d3fc Bump version 2022-11-16 12:41:01 -06:00
asonix
2dd1dfe43f Factor more bookkeeping into check_response 2022-11-16 12:38:34 -06:00
asonix
0d42f72f87 Update deps 2022-11-16 11:42:04 -06:00
asonix
f55ea0a550 Check for signature errors, record signing strings 2022-11-16 11:23:36 -06:00
asonix
7e01cbfc41 Increase http timeout 2022-11-15 22:56:38 -06:00
asonix
1a1b10a6ba Add RUST_LOG to readme 2022-11-15 22:18:38 -06:00
asonix
ffe9944739 Bump version 2022-11-15 22:10:55 -06:00
asonix
e1137fadd8 Handle other request in nodeinfo job 2022-11-15 22:10:15 -06:00
asonix
debec2875d Bump version 2022-11-15 21:26:53 -06:00
asonix
25e8b5a673 Don't print parsed activity on inbox 2022-11-15 21:26:13 -06:00
asonix
4509465e9c Bump version 2022-11-15 20:54:23 -06:00
asonix
0768cb6ac6 Don't fail jobs for fine scenarios
- dont fail contact for breaker
- dont fail instance for not found
2022-11-15 20:53:55 -06:00
asonix
6be72a836b Bump deps 2022-11-15 19:57:07 -06:00
asonix
853301297c Bump version 2022-11-15 19:56:54 -06:00
asonix
5011e05c3d Clean tracing a bit more 2022-11-15 19:56:19 -06:00
asonix
2df34c9e55 Bump version 2022-11-15 13:48:19 -06:00
asonix
e46c9e230b Add boolish to nodinfo, add tests 2022-11-15 13:47:31 -06:00
asonix
a4cb7934b1 Add test for Boolish 2022-11-15 13:12:16 -06:00
asonix
189bd71276 Bump version 2022-11-15 13:08:12 -06:00
asonix
63dc505e61 Bump deps 2022-11-15 13:07:51 -06:00
asonix
6ca6a19178 Parse akkoma registration weirdness 2022-11-15 13:06:57 -06:00
asonix
88cce4e21e Bump version 2022-11-14 19:21:07 -06:00
asonix
bc0bf40551 Update deps 2022-11-14 19:20:42 -06:00
asonix
881654fed9 Try to be a bit more clear about what object is being delivered 2022-11-14 19:18:09 -06:00
asonix
373072c482 Don't log server list in deliver many 2022-11-14 19:11:10 -06:00
asonix
a0afa3f3fa Bump version 2022-11-13 13:59:02 -06:00
asonix
3358ae0461 Simplify debug info for jobs 2022-11-13 13:58:30 -06:00
Maxime Augier
cb7187a096 Merge remote-tracking branch 'upstream/main' into cmdline-ls 2022-11-11 10:18:07 +01:00
Maxime Augier
3b0e9fddc0 Add -l option for dumping existing allow/block lists 2022-11-11 10:00:41 +01:00
asonix
e37314355e Bump version 2022-11-08 13:53:01 -06:00
asonix
fac40c1853 Don't fail deliver for 400 Bad Request 2022-11-08 13:50:08 -06:00
asonix
ea699a7978 Bump version, deps 2022-11-07 21:08:23 -06:00
asonix
64d06f210a Parse masto 4.0 nodeinfo 2022-11-07 21:07:46 -06:00
asonix
4ae7e435eb Ensure proper parsing for masto instance struct on 4.0.0 2022-11-07 19:31:32 -06:00
asonix
d7e9e58de2 Prefer short_description, add more telegram meta commands 2022-11-07 18:49:19 -06:00
asonix
1f87c0f913 Make telegram actually work 2022-11-02 18:57:08 -05:00
asonix
e705a90244 Bump version 2022-11-02 18:05:13 -05:00
asonix
645e6b498a Use String instead of IriString for domain in telegram bot 2022-11-02 18:04:57 -05:00
asonix
bd172753fb Add basic administration via telegram 2022-11-02 17:58:52 -05:00
asonix
279ac9400d Hide generic join text in restricted mode 2022-11-02 14:56:49 -05:00
asonix
0e9b0f94de Update version 2022-11-02 14:17:07 -05:00
asonix
8cb4944bb4 Update deps 2022-11-02 14:16:48 -05:00
asonix
6125e5e63a Add note about restricted mode on index page 2022-11-02 14:16:32 -05:00
asonix
a2bd41a00f Improve concurrency 2022-11-02 13:55:45 -05:00
asonix
32f5a0670f Keep a couple more fields out of logs 2022-11-01 16:50:23 -05:00
asonix
e466a41309 Bump version 2022-11-01 15:57:45 -05:00
asonix
bc263701e2 Log cleanup 2022-11-01 15:57:33 -05:00
asonix
e1e77b0bdf Bump dependencies 2022-11-01 14:32:01 -05:00
asonix
c281cc9273 Bump version 2022-11-01 14:31:12 -05:00
asonix
7e9371e392 Trim newlines in public keys 2022-11-01 14:30:49 -05:00
asonix
e9768383c0 Bump version 2022-10-29 13:47:34 -05:00
asonix
c16adca27c Don't double-hash message bytes 2022-10-29 13:47:23 -05:00
asonix
bd4d5c9983 Fix typo in readme 2022-10-29 13:03:40 -05:00
asonix
a48ef32d54 Update license, readme 2022-10-29 12:49:07 -05:00
asonix
0b0ed56bad Update RSA, other deps 2022-10-29 12:22:13 -05:00
asonix
5ae4c43caa Update deps, structopt -> clap, re-enable cargo publish 2022-09-28 18:01:52 -05:00
Aode (lion)
061c1a0c0a Disable crate publish, bump version 2022-07-02 14:09:00 -05:00
Aode (lion)
79fa0fb828 Update to git background jobs 2022-07-02 14:07:25 -05:00
Aode (lion)
f5d7c80d2e Bump deps 2022-07-02 11:01:02 -05:00
Aode (lion)
74c4a4faec Bump version 2022-04-08 17:43:53 -05:00
Aode (lion)
2313303e22 Update RSA 2022-04-08 17:39:38 -05:00
Aode (Lion)
9a88161ee6 Update deps 2022-03-08 12:26:15 -06:00
Aode (Lion)
32d362201f Stable awc 2022-03-08 11:39:00 -06:00
Aode (Lion)
f9816ddd3b Update to stable actix-web 2022-02-26 12:12:07 -06:00
Aode (lion)
b331d47f23 actix-web rc.3 2022-02-11 11:28:45 -06:00
Aode (lion)
57b10fccb7 opentelemetry 0.17, actix-web rc.2 2022-02-04 11:50:52 -06:00
Aode (lion)
0916b26d67 Update deps 2022-02-01 11:47:17 -06:00
Aode (lion)
ca0a0d8e9f Update actix web 2022-01-23 23:03:00 -06:00
Aode (lion)
e1255e272e Update actix web 2022-01-18 11:26:50 -06:00
Aode (Lion)
c7263e17e1 Bump version 2022-01-17 17:58:13 -06:00
Aode (Lion)
726a479025 Remove direct dependency on time 2022-01-17 17:57:06 -06:00
Aode (Lion)
05288b56e1 Update background jobs 2022-01-17 17:51:19 -06:00
Aode (Lion)
8893895c71 Update to latest activitystreams 2022-01-17 16:54:45 -06:00
Aode (Lion)
6b0d3298cc Update aw.b19 2022-01-06 11:41:03 -06:00
Aode (lion)
ceee883b21 Update aw-b.18 2021-12-30 12:23:10 -06:00
Aode (lion)
c91adb4a4b Rename crate to avoid crates.io collision 2021-12-28 10:30:48 -06:00
Aode (lion)
1b3ae4d2c7 Bump version 2021-12-28 10:23:59 -06:00
Aode (lion)
3e0595fa88 Update actix web 2021-12-28 10:23:48 -06:00
Aode (lion)
9458bc298b Update to latest rust-builder tags 2021-12-22 15:55:30 -06:00
Aode (lion)
c432508e65 Remove cargo publish for now 2021-12-19 14:44:24 -06:00
Aode (lion)
dd92650cff Add console-subscriber, add cargo publish step 2021-12-19 14:24:31 -06:00
Aode (lion)
876cace337 Update deps 2021-12-13 11:39:46 -06:00
Aode (lion)
723251e782 Fail on clippy 2021-12-05 15:28:46 -06:00
Aode (lion)
be909210eb Update drone 2021-12-04 17:27:23 -06:00
Aode (lion)
49bc787bca Update deps 2021-12-04 17:27:15 -06:00
Aode (lion)
1b07fec76f Update drone triggers 2021-12-03 16:54:42 -06:00
Aode (lion)
fbb7b397c2 clippy 2021-12-03 16:20:37 -06:00
Aode (lion)
66c86ebfd6 Update deps, add drone 2021-12-03 16:17:25 -06:00
Aode (lion)
8b6d97fd4a Don't include SpanTrace in Error debug 2021-11-24 21:57:20 -06:00
Aode (lion)
3912f1203d Don't cargo publish relay 2021-11-24 20:51:08 -06:00
Aode (lion)
11242b57f3 Version v0.3.8 2021-11-24 20:27:06 -06:00
Aode (lion)
f2c617d784 Bump version 2021-11-24 20:26:50 -06:00
Aode (lion)
0d38133271 Merge branch 'main' of git.asonix.dog:asonix/relay into main 2021-11-23 16:20:44 -06:00
Aode (lion)
1dba31e3a0 Clippy lints, dashmap for breakers 2021-11-23 16:19:59 -06:00
Aode (Lion)
1b1d3ac8cc Merge branch 'main' of git.asonix.dog:asonix/relay into main 2021-11-23 12:46:42 -06:00
Aode (Lion)
4753a454ab Update deps 2021-11-23 12:43:52 -06:00
Aode (lion)
e1c61d5b5f Update deps 2021-11-22 18:38:58 -06:00
Aode (lion)
8021dca1dd Update background jobs 2021-10-29 19:26:57 -05:00
Aode (lion)
e7bbf3454b Version v0.3.7 2021-10-27 12:21:41 -05:00
Aode (lion)
68bade2129 Bump deps 2021-10-27 11:44:34 -05:00
Aode (lion)
b9761dfd9f Update tracing subscriber 2021-10-24 20:45:38 -05:00
Aode (Lion)
e0ffdf9294 2021 2021-10-21 16:34:13 -05:00
Aode (Lion)
3c4905cde9 Published tracing deps 2021-10-21 11:16:48 -05:00
Aode (Lion)
feadf66a71 Update to latest betas 2021-10-20 18:27:22 -05:00
Aode (Lion)
a2f8e8e21a Version v0.3.5 2021-10-11 14:20:38 -05:00
Aode (Lion)
231290380e Bump background jobs, other deps 2021-10-11 14:19:32 -05:00
Aode (Lion)
ea9854d3b2 Remove unhelpful fields from Request debug 2021-09-21 14:33:42 -05:00
Aode (Lion)
8b27a2dc0f Version v0.3.4 2021-09-21 14:32:39 -05:00
Aode (Lion)
3384ca9064 Clean up debug impls 2021-09-21 14:32:25 -05:00
Aode (Lion)
4b4aaaa0b4 Don't store Config in State 2021-09-21 13:26:31 -05:00
Aode (Lion)
cf60445972 Version v0.3.3 2021-09-21 12:06:24 -05:00
Aode (Lion)
fe91b93694 Update jobs-actix 2021-09-21 12:05:58 -05:00
Aode (Lion)
8cf689b243 Version v0.3.2 2021-09-21 11:21:19 -05:00
Aode (Lion)
2792f5075d Enable tracing AWC 2021-09-21 11:21:06 -05:00
Aode (Lion)
37d197b8dc Version v0.3.1 2021-09-21 10:48:44 -05:00
Aode (Lion)
854db02010 No git deps 2021-09-21 10:47:49 -05:00
Aode (lion)
90965d6ee9 Better format errors 2021-09-20 13:13:21 -05:00
Aode (lion)
3a4d21b349 Version v0.3.0 2021-09-20 12:56:14 -05:00
Aode (lion)
a53c8a0a63 Include version info in builds 2021-09-20 12:49:07 -05:00
Aode (lion)
d26ff4a7cb Switch from tokio-current-thread to tokio for otel 2021-09-18 15:47:10 -05:00
Aode (lion)
82371d4a65 Name relay service for otel 2021-09-18 13:55:11 -05:00
Aode (lion)
5e24bb06a7 Support exporting metrics to Opentelemetry 2021-09-18 13:40:47 -05:00
Aode (lion)
d627c5b6b2 Bump versions 2021-09-18 13:00:35 -05:00
Aode (lion)
751a84c7b3 Simplify cargo toml 2021-09-18 12:57:29 -05:00
Aode (lion)
43e5b6d873 Instrument with tracing 2021-09-18 12:55:39 -05:00
asonix
ebba8e3f60 Fix deploy script 2021-08-01 15:14:44 -05:00
asonix
190df60f7e Version v0.2.17 2021-08-01 15:13:25 -05:00
asonix
8474640278 Bump dependencies 2021-08-01 15:12:06 -05:00
asonix
0f370de1a0 Bump version 2021-06-26 18:16:09 -05:00
asonix
994bf0b6ef Update to latest actix betas 2021-06-26 18:14:43 -05:00
asonix
f125bd614d Version v0.2.15-r0 2021-06-24 12:03:45 -05:00
asonix
f105c427dc Update actix betas 2021-06-24 12:02:27 -05:00
asonix
80bc838073 Bump actix version 2021-04-17 12:36:47 -05:00
asonix
45ba2e5c26 Update deps 2021-04-02 15:38:55 -05:00
asonix
a2b4ad8f7c Update RSA, rand 2021-03-28 15:02:26 -05:00
asonix
b4a6a61f51 Remove max conns from readme 2021-03-19 10:52:04 -05:00
asonix
a48749e310 Update actix deps 2021-03-09 20:09:56 -06:00
asonix
711d9f19b6 Bump alpine 2021-03-08 18:27:48 -06:00
asonix
6f5c000335 Bump deps 2021-03-08 18:26:22 -06:00
asonix
1df3042a1d Bump deps, version 2021-02-11 22:13:42 -06:00
asonix
d6fa5d3617 Expose source_repo env var 2021-02-11 22:11:55 -06:00
asonix
7e38bf235e Bump version 2021-02-11 14:29:07 -06:00
asonix
2e6396be2b Fix staffAccounts extraction 2021-02-11 14:27:41 -06:00
asonix
2015dba733 Bump version 2021-02-11 14:20:47 -06:00
asonix
486dabff9e Error contact job on failed extraction 2021-02-11 14:18:58 -06:00
asonix
2479c2b39d Test contact extraction 2021-02-11 14:17:17 -06:00
asonix
1311f92b6c Add support for pulling a pleroma admin 2021-02-11 13:41:03 -06:00
asonix
6b7f0f2b63 Remove diesel.toml 2021-02-10 21:25:11 -06:00
asonix
43227d9852 Update to actix-web 4.0.0-beta.3 2021-02-10 18:00:11 -06:00
asonix
1c813d917b Spawn cache tasks for new follows 2021-02-10 12:35:37 -06:00
asonix
52408d8189 Add tests for connect/disconnect 2021-02-10 09:47:23 -06:00
asonix
83d05f086a Bump version 2021-02-10 09:24:46 -06:00
asonix
b9f9fd4a12 Allow sub-actors of connected server-actors 2021-02-10 09:23:55 -06:00
asonix
3074823dc9 Bump version 2021-02-10 01:00:06 -06:00
asonix
57ed79add5 Use actor ID in NotSubscribed error 2021-02-10 00:59:16 -06:00
asonix
d0a728a9b5 Log & test connections 2021-02-10 00:57:49 -06:00
asonix
b2904bb1ba Don't add inbox as connection 2021-02-10 00:44:48 -06:00
asonix
a4d70d7e3a Bump version 2021-02-10 00:15:22 -06:00
asonix
2619a93643 Fix rdnn block checks 2021-02-10 00:14:42 -06:00
asonix
c3d5de600d Fix mutex scoping, map payload in-place 2021-02-09 23:45:13 -06:00
asonix
af570c6581 Remove unused deps 2021-02-09 22:24:40 -06:00
asonix
eb21d411b3 Update dockerfiles 2021-02-09 22:19:58 -06:00
asonix
8a5b09f6e7 Update readme 2021-02-09 22:19:04 -06:00
asonix
2c275e441b pub -> pub(crate) 2021-02-09 22:17:20 -06:00
asonix
50d2b5b21c bro we are sledding 2021-02-09 22:05:06 -06:00
asonix
d7e68190c4 Remove direct dependencies on tokio, bytes, reduce lock contention for circuitbreaker 2020-12-29 11:27:14 -06:00
asonix
9923d4d107 Add allow/block check to verifier middleware before key validation 2020-12-23 12:30:19 -06:00
asonix
e2da563a1c Add breakers for requests to down domains 2020-12-23 12:06:15 -06:00
asonix
55cb25f54b Update deps 2020-10-10 12:38:37 -05:00
asonix
b51a389ca9 Update deps, use mastodon-compat mode 2020-09-29 19:33:50 -05:00
asonix
f4690b5a04 Don't style instance descriptions 2020-09-27 12:44:23 -05:00
asonix
dd83f4a859 Stable releases 2020-09-13 19:46:13 -05:00
asonix
2289e6e34c Update deps 2020-09-09 18:47:11 -05:00
asonix
1f065385a0 Update deps 2020-09-07 16:51:02 -05:00
asonix
e80ded92f5 Debug host 2020-09-07 15:28:58 -05:00
asonix
b6e25df717 Add host to signatures 2020-09-07 13:34:29 -05:00
asonix
1d0a1d7cfd update version string 2020-07-25 17:33:11 -05:00
asonix
812de59348 Bump job concurrency 2020-07-25 10:41:39 -05:00
asonix
be07ee0177 Add URL to statuscode error 2020-07-25 10:13:00 -05:00
asonix
8d493f35a8 Update deps 2020-07-25 09:37:10 -05:00
asonix
a26cc2b8f7 Update rsa 2020-07-25 09:33:35 -05:00
asonix
db5284e162 Use memory job store because why not 2020-07-10 20:00:31 -05:00
asonix
51ef5c2796 Revert "Support pleromo registrations format"
This reverts commit 741c4efc0e.
2020-07-10 18:42:48 -05:00
asonix
741c4efc0e Support pleromo registrations format 2020-07-10 18:34:24 -05:00
asonix
e2feeecbb3 Allow missing approval-required 2020-07-10 18:18:05 -05:00
asonix
261e52e551 Better json errors 2020-07-10 18:06:04 -05:00
asonix
c4b4f13fa6 Don't validate content type on response, pleromo is weird 2020-07-10 17:47:41 -05:00
asonix
c499b5355c Send normal json accept header for normal json endpoints 2020-07-10 17:24:47 -05:00
asonix
0f95660aec Update activitystreams 2020-07-10 16:57:50 -05:00
asonix
a68c0039fa Update deps 2020-07-10 15:36:23 -05:00
asonix
b7e8d2e465 Join upstream service with payload fut 2020-07-10 15:34:18 -05:00
asonix
fbe8baaee9 Add payload debug middleware for inbox 2020-07-10 15:07:47 -05:00
asonix
d3ef63d1f3 Update sig lib 2020-07-01 17:32:58 -05:00
asonix
d9ab1b8a76 Support even smaller screens 2020-06-28 12:18:08 -05:00
asonix
652ab6ac42 Remove XsdAnyUri 2020-06-27 17:29:23 -05:00
asonix
bc1003c467 Don't overwrite listener URI 2020-06-20 10:06:01 -05:00
asonix
f9b036fe4e Version v0.1.0-r110 2020-06-19 23:15:33 -05:00
asonix
2bbcc82d9c Update activitystreams 2020-06-19 23:11:02 -05:00
asonix
e65c21582c Version v0.1.0-r109 2020-06-10 19:54:16 -05:00
asonix
5305b70b83 Fix manifest tag order 2020-06-10 19:29:05 -05:00
asonix
579b5822b6 Version v0.1.0-r108 2020-06-10 19:00:29 -05:00
asonix
5913c024c2 Version v0.1.0-r107 2020-06-10 18:18:22 -05:00
asonix
96b526e87e Version v0.1.0-r106 2020-06-10 17:58:10 -05:00
asonix
d32f1370a5 Version v0.1.0-r105 2020-06-10 17:47:57 -05:00
112 changed files with 10879 additions and 5778 deletions

2
.cargo/config.toml Normal file
View file

@ -0,0 +1,2 @@
[build]
rustflags = ["--cfg", "tokio_unstable"]

15
.env
View file

@ -1,2 +1,13 @@
OUT_DIR="compiled_templates"
DATABASE_URL=postgres://ap_actix:ap_actix@localhost:5432/ap_actix
HOSTNAME=localhost:8079
PORT=8079
HTTPS=false
DEBUG=true
RESTRICTED_MODE=true
VALIDATE_SIGNATURES=false
API_TOKEN=somesecretpassword
FOOTER_BLURB="Contact <a href=\"https://masto.asonix.dog/@asonix\">@asonix</a> for inquiries"
LOCAL_DOMAINS="masto.asonix.dog"
LOCAL_BLURB="<p>Welcome to my cool relay where I have cool relay things happening. I hope you enjoy your stay!</p>"
# OPENTELEMETRY_URL=http://localhost:4317
PROMETHEUS_ADDR=127.0.0.1
PROMETHEUS_PORT=9000

View file

@ -0,0 +1,61 @@
on:
push:
branches:
- '*'
pull_request:
branches:
- main
jobs:
clippy:
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Clippy
run: |
cargo clippy --no-default-features -- -D warnings
tests:
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Test
run: cargo test
check:
strategy:
fail-fast: false
matrix:
target:
- x86_64-unknown-linux-musl
- armv7-unknown-linux-musleabihf
- aarch64-unknown-linux-musl
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Debug builds
run: cargo zigbuild --target ${{ matrix.target }}

View file

@ -0,0 +1,226 @@
on:
push:
tags:
- 'v*.*.*'
env:
REGISTRY_IMAGE: asonix/relay
jobs:
clippy:
runs-on: base-image
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Clippy
run: |
# cargo clippy --no-default-features -- -D warnings
cargo clippy --no-default-features
tests:
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Test
run: cargo test
build:
needs:
- clippy
- tests
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
strategy:
fail-fast: false
matrix:
info:
- target: x86_64-unknown-linux-musl
artifact: linux-amd64
platform: linux/amd64
- target: armv7-unknown-linux-musleabihf
artifact: linux-arm32v7
platform: linux/arm/v7
- target: aarch64-unknown-linux-musl
artifact: linux-arm64v8
platform: linux/arm64
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Prepare Platform
run: |
platform=${{ matrix.info.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
shell: bash
-
name: Docker meta
id: meta
uses: https://github.com/docker/metadata-action@v5
with:
images: ${{ env.REGISTRY_IMAGE }}
flavor: |
latest=auto
suffix=-${{ matrix.info.artifact }}
tags: |
type=raw,value=latest,enable={{ is_default_branch }}
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
-
name: Set up QEMU
uses: https://github.com/docker/setup-qemu-action@v3
-
name: Set up Docker Buildx
uses: https://github.com/docker/setup-buildx-action@v3
-
name: Docker login
uses: https://github.com/docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
name: Compile relay
run: cargo zigbuild --target ${{ matrix.info.target }} --release
-
name: Prepare artifacts
run: |
mkdir artifacts
cp target/${{ matrix.info.target }}/release/relay artifacts/relay-${{ matrix.info.artifact }}
-
uses: https://github.com/actions/upload-artifact@v3
with:
name: binaries
path: artifacts/
-
name: Prepare binary
run: |
cp target/${{ matrix.info.target }}/release/relay docker/forgejo/relay
-
name: Build and push ${{ matrix.info.platform }} docker image
id: build
uses: docker/build-push-action@v5
with:
context: ./docker/forgejo
platforms: ${{ matrix.info.platform }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
outputs: type=image,name=${{ env.REGISTRY_IMAGE }},name-canonical=true,push=true
-
name: Export digest
run: |
mkdir -p /tmp/digests
digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${digest#sha256:}"
echo "Created /tmp/digests/${digest#sha256:}"
shell: bash
-
name: Upload ${{ matrix.info.platform }} digest
uses: https://github.com/actions/upload-artifact@v3
with:
name: digests
path: /tmp/digests/*
if-no-files-found: error
retention-days: 1
publish-docker:
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
needs: [build]
steps:
-
name: Download digests
uses: https://github.com/actions/download-artifact@v3
with:
name: digests
path: /tmp/digests
pattern: digests-*
merge-multiple: true
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
-
name: Docker login
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
name: Docker meta
id: meta
uses: https://github.com/docker/metadata-action@v5
with:
images: ${{ env.REGISTRY_IMAGE }}
flavor: |
latest=auto
tags: |
type=raw,value=latest,enable={{ is_default_branch }}
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
-
name: Create manifest list and push
working-directory: /tmp/digests
run: |
tags=$(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "${DOCKER_METADATA_OUTPUT_JSON}")
images=$(printf "${{ env.REGISTRY_IMAGE }}@sha256:%s " *)
echo "Running 'docker buildx imagetools create ${tags[@]} ${images[@]}'"
docker buildx imagetools create ${tags[@]} ${images[@]}
shell: bash
-
name: Inspect Image
run: |
docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ steps.meta.outputs.version }}
publish-forgejo:
needs: [build]
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
- uses: https://github.com/actions/download-artifact@v3
with:
name: binaries
path: artifacts/
merge-multiple: true
- uses: actions/forgejo-release@v1
with:
direction: upload
token: ${{ secrets.GITHUB_TOKEN }}
release-dir: artifacts/
publish-crate:
needs: [build]
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Publish Crate
run: cargo publish --token ${{ secrets.CRATES_IO_TOKEN }}

4
.gitignore vendored
View file

@ -1,2 +1,6 @@
/target
/artifacts
/sled
/.direnv
/.envrc
/result

5248
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,60 +1,114 @@
[package]
name = "relay"
name = "ap-relay"
description = "A simple activitypub relay"
version = "0.1.0"
version = "0.3.116"
authors = ["asonix <asonix@asonix.dog>"]
license-file = "LICENSE"
license = "AGPL-3.0"
readme = "README.md"
repository = "https://git.asonix.dog/asonix/ap-relay"
repository = "https://git.asonix.dog/asonix/relay"
keywords = ["activitypub", "relay"]
edition = "2018"
edition = "2021"
build = "src/build.rs"
[[bin]]
name = "relay"
path = "src/main.rs"
[profile.release]
strip = true
[features]
console = ["dep:console-subscriber"]
default = []
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
anyhow = "1.0"
actix-rt = "1.1.1"
actix-web = { version = "3.0.0-alpha.2", features = ["rustls"] }
actix-webfinger = "0.3.0-alpha.6"
activitystreams-new = { git = "https://git.asonix.dog/asonix/activitystreams-sketch" }
activitystreams-ext = { git = "https://git.asonix.dog/asonix/activitystreams-ext" }
ammonia = "3.1.0"
async-mutex = "1.0.1"
async-trait = "0.1.24"
background-jobs = "0.8.0-alpha.2"
bytes = "0.5.4"
base64 = "0.12"
config = "0.10.1"
deadpool = "0.5.1"
deadpool-postgres = "0.5.5"
actix-web = { version = "4.4.0", default-features = false, features = ["compress-brotli", "compress-gzip", "rustls-0_23"] }
actix-webfinger = { version = "0.5.0", default-features = false }
activitystreams = "0.7.0-alpha.25"
activitystreams-ext = "0.1.0-alpha.3"
ammonia = "4.0.0"
async-cpupool = "0.3.0"
bcrypt = "0.16"
base64 = "0.22"
clap = { version = "4.0.0", features = ["derive"] }
color-eyre = "0.6.2"
config = { version = "0.14.0", default-features = false, features = ["toml", "json", "yaml"] }
console-subscriber = { version = "0.4", optional = true }
dashmap = "6.0.1"
dotenv = "0.15.0"
env_logger = "0.7.1"
futures = "0.3.4"
http-signature-normalization-actix = { version = "0.3.0-alpha.11", default-features = false, features = ["sha-2"] }
log = "0.4"
lru = "0.5.1"
futures-core = "0.3.30"
lru = "0.12.0"
metrics = "0.23.0"
metrics-exporter-prometheus = { version = "0.15.0", default-features = false, features = [
"http-listener",
] }
metrics-util = "0.17.0"
mime = "0.3.16"
num_cpus = "1.12"
pretty_env_logger = "0.4.0"
rand = "0.7"
rsa = "0.2"
rsa-magic-public-key = "0.1.1"
rsa-pem = "0.1.0"
minify-html = "0.15.0"
opentelemetry = "0.27.1"
opentelemetry_sdk = { version = "0.27", features = ["rt-tokio"] }
opentelemetry-otlp = { version = "0.27", features = ["grpc-tonic"] }
pin-project-lite = "0.2.9"
# pinned to metrics-util
quanta = "0.12.0"
rand = "0.8"
reqwest = { version = "0.12", default-features = false, features = ["rustls-tls", "stream"]}
reqwest-middleware = { version = "0.4", default-features = false, features = ["json"] }
reqwest-tracing = "0.5.0"
ring = "0.17.5"
rsa = "0.9"
rsa-magic-public-key = "0.8.0"
rustls = { version = "0.23.0", default-features = false, features = ["ring", "logging", "std", "tls12"] }
rustls-channel-resolver = "0.3.0"
rustls-pemfile = "2"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
sha2 = "0.9"
structopt = "0.3.12"
thiserror = "1.0"
tokio = { version = "0.2.13", features = ["sync"] }
tokio-postgres = { version = "0.5.1", features = ["with-serde_json-1", "with-uuid-0_8", "with-chrono-0_4"] }
ttl_cache = "0.5.1"
uuid = { version = "0.8", features = ["v4", "serde"] }
sled = "0.34.7"
streem = "0.2.0"
teloxide = { version = "0.13.0", default-features = false, features = [
"ctrlc_handler",
"macros",
"rustls",
] }
thiserror = "2.0"
time = { version = "0.3.17", features = ["serde"] }
tracing = "0.1"
tracing-error = "0.2"
tracing-log = "0.2"
tracing-opentelemetry = "0.28"
tracing-subscriber = { version = "0.3", features = [
"ansi",
"env-filter",
"fmt",
] }
tokio = { version = "1", features = ["full", "tracing"] }
uuid = { version = "1", features = ["v4", "serde"] }
[dependencies.background-jobs]
version = "0.19.0"
default-features = false
features = ["error-logging", "metrics", "tokio"]
[dependencies.http-signature-normalization-actix]
version = "0.11.1"
default-features = false
features = ["server", "ring"]
[dependencies.http-signature-normalization-reqwest]
version = "0.13.0"
default-features = false
features = ["middleware", "ring"]
[dependencies.tracing-actix-web]
version = "0.7.9"
[build-dependencies]
anyhow = "1.0"
color-eyre = "0.6.2"
dotenv = "0.15.0"
ructe = { version = "0.11.0", features = ["sass", "mime03"] }
ructe = { version = "0.17.0", features = ["sass", "mime03"] }
toml = "0.8.0"
[profile.dev.package.rsa]
opt-level = 3

968
LICENSE

File diff suppressed because it is too large Load diff

231
README.md
View file

@ -1,31 +1,55 @@
# AodeRelay
_A simple and efficient activitypub relay_
### Installation
#### Docker
If running docker, you can start the relay with the following command:
```
$ sudo docker run --rm -it \
-v "$(pwd):/mnt/" \
-e ADDR=0.0.0.0 \
-e SLED_PATH=/mnt/sled/db-0.34 \
-p 8080:8080 \
asonix/relay:0.3.85
```
This will launch the relay with the database stored in "./sled/db-0.34" and listening on port 8080
#### Cargo
With cargo installed, the relay can be installed to your cargo bin directory with the following command
```
$ cargo install ap-relay
```
Then it can be run with this:
```
$ ADDR=0.0.0.0 relay
```
This will launch the relay with the database stored in "./sled/db-0.34" and listening on port 8080
#### Source
The relay can be launched directly from this git repository with the following commands:
```
$ git clone https://git.asonix.dog/asonix/relay
$ ADDR=0.0.0.0 cargo run --release
```
### Usage
To simply run the server, the command is as follows
```bash
$ ./relay
```
#### Administration
> **NOTE:** The server _must be running_ in order to update the lists with the following commands
To learn about any other tasks, the `--help` flag can be passed
```bash
$ ./relay --help
relay 0.1.0
An activitypub relay
USAGE:
relay [FLAGS] [OPTIONS]
Usage: relay [OPTIONS]
FLAGS:
-h, --help Prints help information
-j, --jobs-only Only process background jobs, do not start the relay server
-n, --no-jobs Only run the relay server, do not process background jobs
-u, --undo Undo whitelisting or blocking domains
-V, --version Prints version information
OPTIONS:
-b <blocks>... A list of domains that should be blocked
-w <whitelists>... A list of domains that should be whitelisted
Options:
-b <BLOCKS> A list of domains that should be blocked
-a <ALLOWED> A list of domains that should be allowed
-u, --undo Undo allowing or blocking domains
-h, --help Print help information
```
To add domains to the blocklist, use the `-b` flag and pass a list of domains
@ -36,19 +60,119 @@ To remove domains from the blocklist, simply pass the `-u` flag along with `-b`
```bash
$ ./relay -ub asonix.dog blimps.xyz
```
The same rules apply for whitelisting domains, although domains are whitelisted with the `-w` flag
The same rules apply for allowing domains, although domains are allowed with the `-a` flag
```bash
$ ./relay -w asonix.dog blimps.xyz
$ ./relay -uw asonix.dog blimps.xyz
$ ./relay -a asonix.dog blimps.xyz
$ ./relay -ua asonix.dog blimps.xyz
```
Whitelisted domains are only checked against incoming activities if `WHITELIST_MODE` is enabled.
Blocks can be published in the nodeinfo metadata by setting `PUBLISH_BLOCKS` to true
### Configuration
By default, all these values are set to development values. These are read from the environment, or
from the `.env` file in the working directory.
```env
HOSTNAME=localhost:8080
ADDR=127.0.0.1
PORT=8080
DEBUG=true
RESTRICTED_MODE=false
VALIDATE_SIGNATURES=false
HTTPS=false
PRETTY_LOG=true
PUBLISH_BLOCKS=false
SLED_PATH=./sled/db-0.34
```
To run this server in production, you'll likely want to set most of them
```env
HOSTNAME=relay.my.tld
ADDR=0.0.0.0
PORT=8080
DEBUG=false
RESTRICTED_MODE=false
VALIDATE_SIGNATURES=true
HTTPS=true
PRETTY_LOG=false
PUBLISH_BLOCKS=true
SLED_PATH=./sled/db-0.34
RUST_LOG=warn
API_TOKEN=somepasswordishtoken
OPENTELEMETRY_URL=localhost:4317
TELEGRAM_TOKEN=secret
TELEGRAM_ADMIN_HANDLE=your_handle
TLS_KEY=/path/to/key
TLS_CERT=/path/to/cert
FOOTER_BLURB="Contact <a href=\"https://masto.asonix.dog/@asonix\">@asonix</a> for inquiries"
LOCAL_DOMAINS=masto.asonix.dog
LOCAL_BLURB="<p>Welcome to my cool relay where I have cool relay things happening. I hope you enjoy your stay!</p>"
PROMETHEUS_ADDR=0.0.0.0
PROMETHEUS_PORT=9000
CLIENT_TIMEOUT=10
DELIVER_CONCURRENCY=8
SIGNATURE_THREADS=2
```
For advanced setups, it may be useful to run the relay API and the background tasks in separate
processes, possibly on separate hosts. The `-j` and `-n` flags have been provided for this purpose.
By passing `-n`, a relay can be spawned that handles no deliveries. By passing `-j`, a relay will
not be spawned, but any deliveries existing in the database will be processed.
#### Descriptions
##### `HOSTNAME`
The domain or IP address the relay is hosted on. If you launch the relay on `example.com`, that would be your HOSTNAME. The default is `localhost:8080`
##### `ADDR`
The address the server binds to. By default, this is `127.0.0.1`, so for production cases it should be set to `0.0.0.0` or another public address.
##### `PORT`
The port the server binds to, this is `8080` by default but can be changed if needed.
##### `DEBUG`
Whether to print incoming activities to the console when requests hit the /inbox route. This defaults to `true`, but should be set to `false` in production cases. Since every activity sent to the relay is public anyway, this doesn't represent a security risk.
##### `RESTRICTED_MODE`
This setting enables an 'allowlist' setup where only servers that have been explicitly enabled through the `relay -a` command can join the relay. This is `false` by default. If `RESTRICTED_MODE` is not enabled, then manually allowing domains with `relay -a` has no effect.
##### `VALIDATE_SIGNATURES`
This setting enforces checking HTTP signatures on incoming activities. It defaults to `true`
##### `HTTPS`
Whether the current server is running on an HTTPS port or not. This is used for generating URLs to the current running relay. By default it is set to `true`
##### `PUBLISH_BLOCKS`
Whether or not to publish a list of blocked domains in the `nodeinfo` metadata for the server. It defaults to `false`.
##### `SLED_PATH`
Where to store the on-disk database of connected servers. This defaults to `./sled/db-0.34`.
##### `RUST_LOG`
The log level to print. Available levels are `ERROR`, `WARN`, `INFO`, `DEBUG`, and `TRACE`. You can also specify module paths to enable some logs but not others, such as `RUST_LOG=warn,tracing_actix_web=info,relay=info`. This defaults to `warn`
##### `SOURCE_REPO`
The URL to the source code for the relay. This defaults to `https://git.asonix.dog/asonix/relay`, but should be changed if you're running a fork hosted elsewhere.
##### `REPOSITORY_COMMIT_BASE`
The base path of the repository commit hash reference. For example, `/src/commit/` for Gitea, `/tree/` for GitLab.
##### `API_TOKEN`
The Secret token used to access the admin APIs. This must be set for the commandline to function
##### `OPENTELEMETRY_URL`
A URL for exporting opentelemetry spans. This is mostly useful for debugging. There is no default, since most people probably don't run an opentelemetry collector.
##### `TELEGRAM_TOKEN`
A Telegram Bot Token for running the relay administration bot. There is no default.
##### `TELEGRAM_ADMIN_HANDLE`
The handle of the telegram user allowed to administer the relay. There is no default.
##### `TLS_KEY`
Optional - This is specified if you are running the relay directly on the internet and have a TLS key to provide HTTPS for your relay
##### `TLS_CERT`
Optional - This is specified if you are running the relay directly on the internet and have a TLS certificate chain to provide HTTPS for your relay
##### `FOOTER_BLURB`
Optional - Add custom notes in the footer of the page
##### `LOCAL_DOMAINS`
Optional - domains of mastodon servers run by the same admin as the relay
##### `LOCAL_BLURB`
Optional - description for the relay
##### `PROMETHEUS_ADDR`
Optional - Address to bind to for serving the prometheus scrape endpoint
##### `PROMETHEUS_PORT`
Optional - Port to bind to for serving the prometheus scrape endpoint
##### `CLIENT_TIMEOUT`
Optional - How long the relay will hold open a connection (in seconds) to a remote server during
fetches and deliveries. This defaults to 10
##### `DELIVER_CONCURRENCY`
Optional - How many deliver requests the relay should allow to be in-flight per thread. the default
is 8
##### `SIGNATURE_THREADS`
Optional - Override number of threads used for signing and verifying requests. Default is
`std::thread::available_parallelism()` (It tries to detect how many cores you have). If it cannot
detect the correct number of cores, it falls back to 1.
##### 'PROXY_URL'
Optional - URL of an HTTP proxy to forward outbound requests through
##### 'PROXY_USERNAME'
Optional - username to provide to the HTTP proxy set with `PROXY_URL` through HTTP Basic Auth
##### 'PROXY_PASSWORD'
Optional - password to provide to the HTTP proxy set with `PROXY_URL` through HTTP Basic Auth
### Subscribing
Mastodon admins can subscribe to this relay by adding the `/inbox` route to their relay settings.
@ -68,10 +192,16 @@ example, if the server is `https://relay.my.tld`, the correct URL would be
- Follow Public, become a listener of the relay
- Undo Follow {self-actor}, stop listening on the relay, an Undo Follow will be sent back
- Undo Follow Public, stop listening on the relay
- Delete {anything}, the Delete {anything} is relayed verbatim to listening servers
- Delete {anything}, the Delete {anything} is relayed verbatim to listening servers.
Note that this activity will likely be rejected by the listening servers unless it has been
signed with a JSON-LD signature
- Update {anything}, the Update {anything} is relayed verbatim to listening servers
- Update {anything}, the Update {anything} is relayed verbatim to listening servers.
Note that this activity will likely be rejected by the listening servers unless it has been
signed with a JSON-LD signature
- Add {anything}, the Add {anything} is relayed verbatim to listening servers.
Note that this activity will likely be rejected by the listening servers unless it has been
signed with a JSON-LD signature
- Remove {anything}, the Remove {anything} is relayed verbatim to listening servers.
Note that this activity will likely be rejected by the listening servers unless it has been
signed with a JSON-LD signature
@ -79,48 +209,17 @@ example, if the server is `https://relay.my.tld`, the correct URL would be
- Webfinger
- NodeInfo
### Configuration
By default, all these values are set to development values. These are read from the environment, or
from the `.env` file in the working directory.
```env
HOSTNAME=localhost:8080
ADDR=127.0.0.1
PORT=8080
DEBUG=true
WHITELIST_MODE=false
VALIDATE_SIGNATURES=false
HTTPS=false
DATABASE_URL=
PRETTY_LOG=true
PUBLISH_BLOCKS=false
MAX_CONNECTIONS=4 # how many postgres connections should be made
```
To run this server in production, you'll likely want to set most of them
```env
HOSTNAME=relay.my.tld
ADDR=0.0.0.0
PORT=8080
DEBUG=false
WHITELIST_MODE=false
VALIDATE_SIGNATURES=true
HTTPS=true
DATABASE_URL=postgres://pg_user:pg_pass@pg_host:pg_port/pg_database
PRETTY_LOG=false
PUBLISH_BLOCKS=true
MAX_CONNECTIONS=16
```
### Known issues
Pleroma and Akkoma do not support validating JSON-LD signatures, meaning many activities such as Delete, Update, Add, and Remove will be rejected with a message similar to `WARN: Response from https://example.com/inbox, "Invalid HTTP Signature"`. This is normal and not an issue with the relay.
### Contributing
Unless otherwise stated, all contributions to this project will be licensed under the CSL with
the exceptions listed in the License section of this file.
Feel free to open issues for anything you find an issue with. Please note that any contributed code will be licensed under the AGPLv3.
### License
This work is licensed under the Cooperative Software License. This is not a Free Software
License, but may be considered a "source-available License." For most hobbyists, self-employed
developers, worker-owned companies, and cooperatives, this software can be used in most
projects so long as this software is distributed under the terms of the CSL. For more
information, see the provided LICENSE file. If none exists, the license can be found online
[here](https://lynnesbian.space/csl/). If you are a free software project and wish to use this
software under the terms of the GNU Affero General Public License, please contact me at
[asonix@asonix.dog](mailto:asonix@asonix.dog) and we can sort that out. If you wish to use this
project under any other license, especially in proprietary software, the answer is likely no.
Copyright © 2022 Riley Trautman
AodeRelay is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
AodeRelay is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. This file is part of AodeRelay.
You should have received a copy of the GNU General Public License along with AodeRelay. If not, see [http://www.gnu.org/licenses/](http://www.gnu.org/licenses/).

View file

@ -1,5 +0,0 @@
# For documentation on how to configure this file,
# see diesel.rs/guides/configuring-diesel-cli
[print_schema]
file = "src/schema.rs"

24
docker/forgejo/Dockerfile Normal file
View file

@ -0,0 +1,24 @@
FROM alpine:3.19
ARG UID=991
ARG GID=991
ENV \
UID=${UID} \
GID=${GID}
USER root
RUN \
addgroup -g "${GID}" app && \
adduser -D -G app -u "${UID}" -g "" -h /opt/app app && \
apk add tini && \
chown -R app:app /mnt
COPY relay /usr/local/bin/relay
USER app
EXPOSE 6669
EXPOSE 8080
VOLUME /mnt
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["/usr/local/bin/relay"]

View file

@ -1,72 +0,0 @@
FROM rustembedded/cross:x86_64-unknown-linux-musl AS amd64-builder
ARG UID=991
ARG GID=991
ENV TOOLCHAIN=stable
ENV TARGET=x86_64-unknown-linux-musl
ENV TOOL=x86_64-linux-musl
RUN \
apt-get update && \
apt-get upgrade -y
RUN \
addgroup --gid "${GID}" build && \
adduser \
--disabled-password \
--gecos "" \
--ingroup build \
--uid "${UID}" \
--home /opt/build \
build
ADD https://sh.rustup.rs /opt/build/rustup.sh
RUN \
chown -R build:build /opt/build
USER build
WORKDIR /opt/build
ENV PATH="$PATH:/opt/build/.cargo/bin"
RUN \
chmod +x rustup.sh && \
./rustup.sh --default-toolchain $TOOLCHAIN --profile minimal -y && \
rustup target add $TARGET
FROM amd64-builder as builder
ARG TAG=master
ARG REPOSITORY=https://git.asonix.dog/asonix/ap-relay
ARG BINARY=relay
RUN \
git clone -b $TAG $REPOSITORY repo
WORKDIR /opt/build/repo
RUN \
cargo build --release --target $TARGET && \
$TOOL-strip target/$TARGET/release/$BINARY
FROM amd64/alpine:3.12
ARG UID=991
ARG GID=991
ARG BINARY=relay
RUN \
apk add tini && \
addgroup --gid $GID relay && \
adduser -D -G relay -u $UID -g "" -h /opt/relay relay && \
chown -R relay:relay /opt/relay
COPY --from=build /relay /usr/bin/relay
EXPOSE 8080
WORKDIR /opt/relay
USER relay
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["relay"]

View file

@ -1,72 +0,0 @@
FROM rustembedded/cross:arm-unknown-linux-musleabihf AS arm32v7-builder
ARG UID=991
ARG GID=991
ENV TOOLCHAIN=stable
ENV TARGET=arm-unknown-linux-musleabihf
ENV TOOL=arm-linux-musleabihf
RUN \
apt-get update && \
apt-get upgrade -y
RUN \
addgroup --gid "${GID}" build && \
adduser \
--disabled-password \
--gecos "" \
--ingroup build \
--uid "${UID}" \
--home /opt/build \
build
ADD https://sh.rustup.rs /opt/build/rustup.sh
RUN \
chown -R build:build /opt/build
USER build
WORKDIR /opt/build
ENV PATH="$PATH:/opt/build/.cargo/bin"
RUN \
chmod +x rustup.sh && \
./rustup.sh --default-toolchain $TOOLCHAIN --profile minimal -y && \
rustup target add $TARGET
FROM arm32v7-builder as builder
ARG TAG=master
ARG REPOSITORY=https://git.asonix.dog/asonix/ap-relay
ARG BINARY=relay
RUN \
git clone -b $TAG $REPOSITORY repo
WORKDIR /opt/build/repo
RUN \
cargo build --release --target $TARGET && \
$TOOL-strip target/$TARGET/release/$BINARY
FROM arm32v7/alpine:3.12
ARG UID=991
ARG GID=991
ARG BINARY=relay
RUN \
apk add tini && \
addgroup --gid $GID relay && \
adduser -D -G relay -u $UID -g "" -h /opt/relay relay && \
chown -R relay:relay /opt/relay
COPY --from=build /relay /usr/bin/relay
EXPOSE 8080
WORKDIR /opt/relay
USER relay
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["relay"]

View file

@ -1,72 +0,0 @@
FROM rustembedded/cross:aarch64-unknown-linux-musl AS aarch64-builder
ARG UID=991
ARG GID=991
ENV TOOLCHAIN=stable
ENV TARGET=aarch64-unknown-linux-musl
ENV TOOL=aarch64-linux-musl
RUN \
apt-get update && \
apt-get upgrade -y
RUN \
addgroup --gid "${GID}" build && \
adduser \
--disabled-password \
--gecos "" \
--ingroup build \
--uid "${UID}" \
--home /opt/build \
build
ADD https://sh.rustup.rs /opt/build/rustup.sh
RUN \
chown -R build:build /opt/build
USER build
WORKDIR /opt/build
ENV PATH="PATH:/opt/build/.cargo/bin"
RUN \
chmod +x rustup.sh && \
./rustup.sh --default-toolchain $TOOLCHAIN --profile minimal -y && \
rustup target add $TARGET
FROM aarch64-builder as builder
ARG TAG=master
ARG REPOSITORY=https://git.asonix.dog/asonix/ap-relay
ARG BINARY=relay
RUN \
git clone -b $TAG $REPOSITORY repo
WORKDIR /opt/build/repo
RUN \
cargo build --release --target $TARGET && \
$TOOL-strip target/$TARGET/release/$BINARY
FROM arm64v8/alpine:3.12
ARG UID=991
ARG GID=991
ARG BINARY=relay
RUN \
apk add tini && \
addgroup --gid $GID relay && \
adduser -D -G relay -u $UID -g "" -h /opt/relay relay && \
chown -R relay:relay /opt/relay
COPY --from=build /relay /usr/bin/relay
EXPOSE 8080
WORKDIR /opt/relay
USER relay
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["relay"]

View file

@ -1,11 +0,0 @@
FROM asonix/diesel-cli:v1.4.0-r1-arm64v8
COPY migrations /migrations
USER root
RUN \
apt-get install -y tini && \
chown -R diesel:diesel /migrations
USER diesel
ENTRYPOINT ["/usr/bin/tini"]
CMD ["diesel", "migration", "run", "--migration-dir", "/migrations"]

View file

@ -1,72 +0,0 @@
#!/usr/bin/env bash
TAG=$1
MIGRATIONS=$2
function require() {
if [ "$1" = "" ]; then
echo "input '$2' required"
print_help
exit 1
fi
}
function print_help() {
echo "build.sh"
echo ""
echo "Usage:"
echo " build.sh [tag] [migrations]"
echo ""
echo "Args:"
echo " tag: The git tag to create and publish"
echo " migrations: (optional) Whether to build the migrations container as well"
}
function build_image() {
repo=$1
tag=$2
arch=$3
docker build \
--pull \
--build-arg TAG="${tag}" \
-f "Dockerfile.${arch}" \
-t "${repo}:${tag}-${arch}" \
-t "${repo}:latest-${arch}" \
.
docker push "${repo}:${tag}-arm64v8"
docker push "${repo}:latest-arm64v8"
}
require "$TAG" "tag"
if ! docker run --rm -it arm64v8/ubuntu:19.10 /bin/bash -c 'echo "docker is configured correctly"'; then
echo "docker is not configured to run on qemu-emulated architectures, fixing will require sudo"
sudo docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
fi
set -xe
git checkout master
git commit -m "Version $TAG"
git tag $TAG
git push origin $TAG
git push
build_image "asonix/relay" "$TAG" "arm64v8"
build_image "asonix/relay" "$TAG" "arm32v7"
build_image "asonix/relay" "$TAG" "amd64"
./manifest.sh "asonix/relay" "$TAG"
./manifest.sh "asonix/relay" "latest"
if [ "${MIGRATIONS}" = "migrations" ]; then
build_image "asonix/relay-migrations" "$TAG" arm64v8
build_image "asonix/relay-migrations" "$TAG" arm32v7
build_image "asonix/relay-migrations" "$TAG" amd64
./manifest.sh "asonix/relay-migrations" "$TAG"
./manifest.sh "asonix/relay-migrations" "latest"
fi

View file

@ -0,0 +1,20 @@
version: '3.3'
services:
relay:
image: asonix/relay:0.3.115
ports:
- "8079:8079"
restart: always
environment:
- HOSTNAME=relay.my.tld
- ADDR=0.0.0.0
- PORT=8080
- DEBUG=false
- RESTRICTED_MODE=false
- VALIDATE_SIGNATURES=true
- HTTPS=true
- SLED_PATH=/mnt/sled/db-0.34
- PRETTY_LOG=false
- PUBLISH_BLOCKS=true
- API_TOKEN=somepasswordishtoken

View file

@ -1,43 +0,0 @@
#!/usr/bin/env bash
function require() {
if [ "$1" = "" ]; then
echo "input '$2' required"
print_help
exit 1
fi
}
function print_help() {
echo "deploy.sh"
echo ""
echo "Usage:"
echo " manifest.sh [tag]"
echo ""
echo "Args:"
echo " repo: The docker repository to push the manifest to"
echo " tag: The git tag to be applied to the image manifest"
}
repo=$2
tag=$2
require "$repo" "repo"
require "$tag" "tag"
set -xe
docker manifest create $repo:$tag \
-a $repo:arm64v8-$tag \
-a $repo:arm32v7-$tag \
-a $repo:amd64-$tag
docker manifest annotate $repo:$tag \
$repo:arm64v8-$tag --os linux --arch arm64 --variant v8
docker manifest annotate $repo:$tag \
$repo:arm32v7-$tag --os linux --arch arm --variant v7
docker manifest annotate $repo:$tag \
$repo:amd64-$tag --os linux --arch amd64
docker manifest push $repo:$tag --purge

61
flake.lock generated Normal file
View file

@ -0,0 +1,61 @@
{
"nodes": {
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1733550349,
"narHash": "sha256-NcGumB4Lr6KSDq+nIqXtNA8QwAQKDSZT7N9OTGWbTrs=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "e2605d0744c2417b09f8bf850dfca42fcf537d34",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-24.11",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

34
flake.nix Normal file
View file

@ -0,0 +1,34 @@
{
description = "relay";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
flake-utils.url = "github:numtide/flake-utils";
};
outputs = { self, nixpkgs, flake-utils }:
flake-utils.lib.eachDefaultSystem (system:
let
pkgs = import nixpkgs {
inherit system;
};
in
{
packages = rec {
relay = pkgs.callPackage ./relay.nix { };
default = relay;
};
apps = rec {
dev = flake-utils.lib.mkApp { drv = self.packages.${system}.pict-rs-proxy; };
default = dev;
};
devShell = with pkgs; mkShell {
nativeBuildInputs = [ cargo cargo-outdated cargo-zigbuild clippy gcc protobuf rust-analyzer rustc rustfmt ];
RUST_SRC_PATH = "${pkgs.rust.packages.stable.rustPlatform.rustLibSrc}";
};
});
}

View file

View file

@ -1,6 +0,0 @@
-- This file was automatically created by Diesel to setup helper functions
-- and other internal bookkeeping. This file is safe to edit, any future
-- changes will be added to existing projects as new migrations.
DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass);
DROP FUNCTION IF EXISTS diesel_set_updated_at();

View file

@ -1,36 +0,0 @@
-- This file was automatically created by Diesel to setup helper functions
-- and other internal bookkeeping. This file is safe to edit, any future
-- changes will be added to existing projects as new migrations.
-- Sets up a trigger for the given table to automatically set a column called
-- `updated_at` whenever the row is modified (unless `updated_at` was included
-- in the modified columns)
--
-- # Example
--
-- ```sql
-- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW());
--
-- SELECT diesel_manage_updated_at('users');
-- ```
CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$
BEGIN
EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s
FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl);
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$
BEGIN
IF (
NEW IS DISTINCT FROM OLD AND
NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at
) THEN
NEW.updated_at := current_timestamp;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;

View file

@ -1,3 +0,0 @@
-- This file should undo anything in `up.sql`
DROP INDEX listeners_actor_id_index;
DROP TABLE listeners;

View file

@ -1,11 +0,0 @@
-- Your SQL goes here
CREATE TABLE listeners (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
actor_id TEXT UNIQUE NOT NULL,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP
);
CREATE INDEX listeners_actor_id_index ON listeners(actor_id);
SELECT diesel_manage_updated_at('listeners');

View file

@ -1,3 +0,0 @@
-- This file should undo anything in `up.sql`
DROP INDEX blocks_domain_name_index;
DROP TABLE blocks;

View file

@ -1,11 +0,0 @@
-- Your SQL goes here
CREATE TABLE blocks (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
domain_name TEXT UNIQUE NOT NULL,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP
);
CREATE INDEX blocks_domain_name_index ON blocks(domain_name);
SELECT diesel_manage_updated_at('blocks');

View file

@ -1,3 +0,0 @@
-- This file should undo anything in `up.sql`
DROP INDEX whitelists_domain_name_index;
DROP TABLE whitelists;

View file

@ -1,11 +0,0 @@
-- Your SQL goes here
CREATE TABLE whitelists (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
domain_name TEXT UNIQUE NOT NULL,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP
);
CREATE INDEX whitelists_domain_name_index ON whitelists(domain_name);
SELECT diesel_manage_updated_at('whitelists');

View file

@ -1,3 +0,0 @@
-- This file should undo anything in `up.sql`
DROP INDEX settings_key_index;
DROP TABLE settings;

View file

@ -1,12 +0,0 @@
-- Your SQL goes here
CREATE TABLE settings (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
key TEXT UNIQUE NOT NULL,
value TEXT NOT NULL,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP
);
CREATE INDEX settings_key_index ON settings(key);
SELECT diesel_manage_updated_at('settings');

View file

@ -1,8 +0,0 @@
-- This file should undo anything in `up.sql`
DROP TRIGGER IF EXISTS whitelists_notify ON whitelists;
DROP TRIGGER IF EXISTS blocks_notify ON blocks;
DROP TRIGGER IF EXISTS listeners_notify ON listeners;
DROP FUNCTION IF EXISTS invoke_whitelists_trigger();
DROP FUNCTION IF EXISTS invoke_blocks_trigger();
DROP FUNCTION IF EXISTS invoke_listeners_trigger();

View file

@ -1,99 +0,0 @@
-- Your SQL goes here
CREATE OR REPLACE FUNCTION invoke_listeners_trigger ()
RETURNS TRIGGER
LANGUAGE plpgsql
AS $$
DECLARE
rec RECORD;
channel TEXT;
payload TEXT;
BEGIN
case TG_OP
WHEN 'INSERT' THEN
rec := NEW;
channel := 'new_listeners';
payload := NEW.actor_id;
WHEN 'DELETE' THEN
rec := OLD;
channel := 'rm_listeners';
payload := OLD.actor_id;
ELSE
RAISE EXCEPTION 'Unknown TG_OP: "%". Should not occur!', TG_OP;
END CASE;
PERFORM pg_notify(channel, payload::TEXT);
RETURN rec;
END;
$$;
CREATE OR REPLACE FUNCTION invoke_blocks_trigger ()
RETURNS TRIGGER
LANGUAGE plpgsql
AS $$
DECLARE
rec RECORD;
channel TEXT;
payload TEXT;
BEGIN
case TG_OP
WHEN 'INSERT' THEN
rec := NEW;
channel := 'new_blocks';
payload := NEW.domain_name;
WHEN 'DELETE' THEN
rec := OLD;
channel := 'rm_blocks';
payload := OLD.domain_name;
ELSE
RAISE EXCEPTION 'Unknown TG_OP: "%". Should not occur!', TG_OP;
END CASE;
PERFORM pg_notify(channel, payload::TEXT);
RETURN NULL;
END;
$$;
CREATE OR REPLACE FUNCTION invoke_whitelists_trigger ()
RETURNS TRIGGER
LANGUAGE plpgsql
AS $$
DECLARE
rec RECORD;
channel TEXT;
payload TEXT;
BEGIN
case TG_OP
WHEN 'INSERT' THEN
rec := NEW;
channel := 'new_whitelists';
payload := NEW.domain_name;
WHEN 'DELETE' THEN
rec := OLD;
channel := 'rm_whitelists';
payload := OLD.domain_name;
ELSE
RAISE EXCEPTION 'Unknown TG_OP: "%". Should not occur!', TG_OP;
END CASE;
PERFORM pg_notify(channel, payload::TEXT);
RETURN rec;
END;
$$;
CREATE TRIGGER listeners_notify
AFTER INSERT OR UPDATE OR DELETE
ON listeners
FOR EACH ROW
EXECUTE PROCEDURE invoke_listeners_trigger();
CREATE TRIGGER blocks_notify
AFTER INSERT OR UPDATE OR DELETE
ON blocks
FOR EACH ROW
EXECUTE PROCEDURE invoke_blocks_trigger();
CREATE TRIGGER whitelists_notify
AFTER INSERT OR UPDATE OR DELETE
ON whitelists
FOR EACH ROW
EXECUTE PROCEDURE invoke_whitelists_trigger();

View file

@ -1,3 +0,0 @@
-- This file should undo anything in `up.sql`
DROP INDEX jobs_queue_status_index;
DROP TABLE jobs;

View file

@ -1,17 +0,0 @@
-- Your SQL goes here
CREATE TABLE jobs (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
job_id UUID UNIQUE NOT NULL,
job_queue TEXT NOT NULL,
job_timeout BIGINT NOT NULL,
job_updated TIMESTAMP NOT NULL,
job_status TEXT NOT NULL,
job_value JSONB NOT NULL,
job_next_run TIMESTAMP,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP NOT NULL DEFAULT NOW()
);
CREATE INDEX jobs_queue_status_index ON jobs(job_queue, job_status);
SELECT diesel_manage_updated_at('jobs');

View file

@ -1,4 +0,0 @@
-- This file should undo anything in `up.sql`
DROP TRIGGER IF EXISTS actors_notify ON actors;
DROP FUNCTION IF EXISTS invoke_actors_trigger();
DROP TABLE actors;

View file

@ -1,49 +0,0 @@
-- Your SQL goes here
CREATE TABLE actors (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
actor_id TEXT UNIQUE NOT NULL,
public_key TEXT NOT NULL,
public_key_id TEXT UNIQUE NOT NULL,
listener_id UUID NOT NULL REFERENCES listeners(id) ON DELETE CASCADE,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP NOT NULL DEFAULT NOW()
);
SELECT diesel_manage_updated_at('actors');
CREATE OR REPLACE FUNCTION invoke_actors_trigger ()
RETURNS TRIGGER
LANGUAGE plpgsql
AS $$
DECLARE
rec RECORD;
channel TEXT;
payload TEXT;
BEGIN
case TG_OP
WHEN 'INSERT' THEN
rec := NEW;
channel := 'new_actors';
payload := NEW.actor_id;
WHEN 'UPDATE' THEN
rec := NEW;
channel := 'new_actors';
payload := NEW.actor_id;
WHEN 'DELETE' THEN
rec := OLD;
channel := 'rm_actors';
payload := OLD.actor_id;
ELSE
RAISE EXCEPTION 'Unknown TG_OP: "%". Should not occur!', TG_OP;
END CASE;
PERFORM pg_notify(channel, payload::TEXT);
RETURN rec;
END;
$$;
CREATE TRIGGER actors_notify
AFTER INSERT OR UPDATE OR DELETE
ON actors
FOR EACH ROW
EXECUTE PROCEDURE invoke_actors_trigger();

View file

@ -1,2 +0,0 @@
-- This file should undo anything in `up.sql`
DROP TABLE nodes;

View file

@ -1,12 +0,0 @@
-- Your SQL goes here
CREATE TABLE nodes (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
listener_id UUID NOT NULL REFERENCES listeners(id) ON DELETE CASCADE,
nodeinfo JSONB,
instance JSONB,
contact JSONB,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP NOT NULL DEFAULT NOW()
);
SELECT diesel_manage_updated_at('nodes');

View file

@ -1,3 +0,0 @@
-- This file should undo anything in `up.sql`
DROP TRIGGER IF EXISTS nodes_notify ON nodes;
DROP FUNCTION IF EXISTS invoke_nodes_trigger();

View file

@ -1,37 +0,0 @@
-- Your SQL goes here
CREATE OR REPLACE FUNCTION invoke_nodes_trigger ()
RETURNS TRIGGER
LANGUAGE plpgsql
AS $$
DECLARE
rec RECORD;
channel TEXT;
payload TEXT;
BEGIN
case TG_OP
WHEN 'INSERT' THEN
rec := NEW;
channel := 'new_nodes';
payload := NEW.listener_id;
WHEN 'UPDATE' THEN
rec := NEW;
channel := 'new_nodes';
payload := NEW.listener_id;
WHEN 'DELETE' THEN
rec := OLD;
channel := 'rm_nodes';
payload := OLD.listener_id;
ELSE
RAISE EXCEPTION 'Unknown TG_OP: "%". Should not occur!', TG_OP;
END CASE;
PERFORM pg_notify(channel, payload::TEXT);
RETURN rec;
END;
$$;
CREATE TRIGGER nodes_notify
AFTER INSERT OR UPDATE OR DELETE
ON nodes
FOR EACH ROW
EXECUTE PROCEDURE invoke_nodes_trigger();

View file

@ -1,2 +0,0 @@
-- This file should undo anything in `up.sql`
ALTER TABLE nodes DROP CONSTRAINT nodes_listener_ids_unique;

View file

@ -1,2 +0,0 @@
-- Your SQL goes here
ALTER TABLE nodes ADD CONSTRAINT nodes_listener_ids_unique UNIQUE (listener_id);

View file

@ -1,2 +0,0 @@
-- This file should undo anything in `up.sql`
DROP TABLE media;

View file

@ -1,10 +0,0 @@
-- Your SQL goes here
CREATE TABLE media (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
media_id UUID UNIQUE NOT NULL,
url TEXT UNIQUE NOT NULL,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP NOT NULL DEFAULT NOW()
);
SELECT diesel_manage_updated_at('media');

23
relay.nix Normal file
View file

@ -0,0 +1,23 @@
{ lib
, nixosTests
, rustPlatform
}:
rustPlatform.buildRustPackage {
pname = "relay";
version = "0.3.116";
src = ./.;
cargoLock.lockFile = ./Cargo.lock;
RUSTFLAGS = "--cfg tokio_unstable";
nativeBuildInputs = [ ];
passthru.tests = { inherit (nixosTests) relay; };
meta = with lib; {
description = "An ActivityPub relay";
homepage = "https://git.asonix.dog/asonix/relay";
license = with licenses; [ agpl3Plus ];
};
}

View file

@ -20,7 +20,6 @@ body * {
}
header {
.header-text {
max-width: 700px;
margin: auto;
@ -42,7 +41,7 @@ header {
}
}
section {
article {
background-color: #fff;
color: #333;
border: 1px solid #e5e5e5;
@ -52,8 +51,16 @@ section {
max-width: 700px;
padding-bottom: 32px;
> p:first-child {
margin-top: 0;
section {
border-bottom: 1px solid #e5e5e5;
> h4:first-child,
> p:first-child {
margin-top: 0;
}
> p:last-child {
margin-bottom: 0;
}
}
h3 {
@ -68,18 +75,20 @@ section {
li {
padding-top: 36px;
border-bottom: 1px solid #e5e5e5;
}
.padded {
padding: 0 24px;
}
.local-explainer,
.joining {
padding: 24px;
}
a {
transition: color .2s cubic-bezier(.3,0,.5,1);
&,
&:focus,
&:active {
@ -160,6 +169,25 @@ footer {
.instance-admin {
margin: 24px 0;
}
.description .please-stay {
h3 {
padding: 0;
margin: 0;
border-bottom: none;
}
ul {
list-style: disc;
padding-left: 24px;
li {
padding: 0;
}
}
article section {
border-bottom: none;
}
}
}
a {
@ -208,7 +236,7 @@ footer {
padding: 24px;
}
section {
article {
border-left: none;
border-right: none;
border-radius: 0;
@ -238,3 +266,14 @@ footer {
}
}
}
@media(max-width: 360px) {
.admin {
flex-direction: column;
}
.right {
margin: 16px;
margin-top: 0;
}
}

32
src/admin.rs Normal file
View file

@ -0,0 +1,32 @@
use activitystreams::iri_string::types::IriString;
use std::collections::{BTreeMap, BTreeSet};
use time::OffsetDateTime;
pub mod client;
pub mod routes;
#[derive(serde::Deserialize, serde::Serialize)]
pub(crate) struct Domains {
domains: Vec<String>,
}
#[derive(serde::Deserialize, serde::Serialize)]
pub(crate) struct AllowedDomains {
pub(crate) allowed_domains: Vec<String>,
}
#[derive(serde::Deserialize, serde::Serialize)]
pub(crate) struct BlockedDomains {
pub(crate) blocked_domains: Vec<String>,
}
#[derive(serde::Deserialize, serde::Serialize)]
pub(crate) struct ConnectedActors {
pub(crate) connected_actors: Vec<IriString>,
}
#[derive(serde::Deserialize, serde::Serialize)]
pub(crate) struct LastSeen {
pub(crate) last_seen: BTreeMap<OffsetDateTime, BTreeSet<String>>,
pub(crate) never: Vec<String>,
}

133
src/admin/client.rs Normal file
View file

@ -0,0 +1,133 @@
use crate::{
admin::{AllowedDomains, BlockedDomains, ConnectedActors, Domains, LastSeen},
collector::Snapshot,
config::{AdminUrlKind, Config},
error::{Error, ErrorKind},
extractors::XApiToken,
};
use reqwest_middleware::ClientWithMiddleware;
use serde::de::DeserializeOwned;
pub(crate) async fn allow(
client: &ClientWithMiddleware,
config: &Config,
domains: Vec<String>,
) -> Result<(), Error> {
post_domains(client, config, domains, AdminUrlKind::Allow).await
}
pub(crate) async fn disallow(
client: &ClientWithMiddleware,
config: &Config,
domains: Vec<String>,
) -> Result<(), Error> {
post_domains(client, config, domains, AdminUrlKind::Disallow).await
}
pub(crate) async fn block(
client: &ClientWithMiddleware,
config: &Config,
domains: Vec<String>,
) -> Result<(), Error> {
post_domains(client, config, domains, AdminUrlKind::Block).await
}
pub(crate) async fn unblock(
client: &ClientWithMiddleware,
config: &Config,
domains: Vec<String>,
) -> Result<(), Error> {
post_domains(client, config, domains, AdminUrlKind::Unblock).await
}
pub(crate) async fn allowed(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<AllowedDomains, Error> {
get_results(client, config, AdminUrlKind::Allowed).await
}
pub(crate) async fn blocked(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<BlockedDomains, Error> {
get_results(client, config, AdminUrlKind::Blocked).await
}
pub(crate) async fn connected(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<ConnectedActors, Error> {
get_results(client, config, AdminUrlKind::Connected).await
}
pub(crate) async fn stats(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<Snapshot, Error> {
get_results(client, config, AdminUrlKind::Stats).await
}
pub(crate) async fn last_seen(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<LastSeen, Error> {
get_results(client, config, AdminUrlKind::LastSeen).await
}
async fn get_results<T: DeserializeOwned>(
client: &ClientWithMiddleware,
config: &Config,
url_kind: AdminUrlKind,
) -> Result<T, Error> {
let x_api_token = config.x_api_token().ok_or(ErrorKind::MissingApiToken)?;
let iri = config.generate_admin_url(url_kind);
let res = client
.get(iri.as_str())
.header(XApiToken::http1_name(), x_api_token.to_string())
.send()
.await
.map_err(|e| ErrorKind::SendRequest(iri.to_string(), e.to_string()))?;
if !res.status().is_success() {
return Err(ErrorKind::Status(
iri.to_string(),
crate::http1::status_to_http02(res.status()),
)
.into());
}
let t = res
.json()
.await
.map_err(|e| ErrorKind::ReceiveResponse(iri.to_string(), e.to_string()))?;
Ok(t)
}
async fn post_domains(
client: &ClientWithMiddleware,
config: &Config,
domains: Vec<String>,
url_kind: AdminUrlKind,
) -> Result<(), Error> {
let x_api_token = config.x_api_token().ok_or(ErrorKind::MissingApiToken)?;
let iri = config.generate_admin_url(url_kind);
let res = client
.post(iri.as_str())
.header(XApiToken::http1_name(), x_api_token.to_string())
.json(&Domains { domains })
.send()
.await
.map_err(|e| ErrorKind::SendRequest(iri.to_string(), e.to_string()))?;
if !res.status().is_success() {
tracing::warn!("Failed to allow domains");
}
Ok(())
}

90
src/admin/routes.rs Normal file
View file

@ -0,0 +1,90 @@
use crate::{
admin::{AllowedDomains, BlockedDomains, ConnectedActors, Domains, LastSeen},
collector::{MemoryCollector, Snapshot},
error::Error,
extractors::Admin,
};
use actix_web::{
web::{Data, Json},
HttpResponse,
};
use std::collections::{BTreeMap, BTreeSet};
use time::OffsetDateTime;
pub(crate) async fn allow(
admin: Admin,
Json(Domains { domains }): Json<Domains>,
) -> Result<HttpResponse, Error> {
admin.db_ref().add_allows(domains).await?;
Ok(HttpResponse::NoContent().finish())
}
pub(crate) async fn disallow(
admin: Admin,
Json(Domains { domains }): Json<Domains>,
) -> Result<HttpResponse, Error> {
admin.db_ref().remove_allows(domains).await?;
Ok(HttpResponse::NoContent().finish())
}
pub(crate) async fn block(
admin: Admin,
Json(Domains { domains }): Json<Domains>,
) -> Result<HttpResponse, Error> {
admin.db_ref().add_blocks(domains).await?;
Ok(HttpResponse::NoContent().finish())
}
pub(crate) async fn unblock(
admin: Admin,
Json(Domains { domains }): Json<Domains>,
) -> Result<HttpResponse, Error> {
admin.db_ref().remove_blocks(domains).await?;
Ok(HttpResponse::NoContent().finish())
}
pub(crate) async fn allowed(admin: Admin) -> Result<Json<AllowedDomains>, Error> {
let allowed_domains = admin.db_ref().allows().await?;
Ok(Json(AllowedDomains { allowed_domains }))
}
pub(crate) async fn blocked(admin: Admin) -> Result<Json<BlockedDomains>, Error> {
let blocked_domains = admin.db_ref().blocks().await?;
Ok(Json(BlockedDomains { blocked_domains }))
}
pub(crate) async fn connected(admin: Admin) -> Result<Json<ConnectedActors>, Error> {
let connected_actors = admin.db_ref().connected_ids().await?;
Ok(Json(ConnectedActors { connected_actors }))
}
pub(crate) async fn stats(
_admin: Admin,
collector: Data<MemoryCollector>,
) -> Result<Json<Snapshot>, Error> {
Ok(Json(collector.snapshot()))
}
pub(crate) async fn last_seen(admin: Admin) -> Result<Json<LastSeen>, Error> {
let nodes = admin.db_ref().last_seen().await?;
let mut last_seen: BTreeMap<OffsetDateTime, BTreeSet<String>> = BTreeMap::new();
let mut never = Vec::new();
for (domain, datetime) in nodes {
if let Some(datetime) = datetime {
last_seen.entry(datetime).or_default().insert(domain);
} else {
never.push(domain);
}
}
Ok(Json(LastSeen { last_seen, never }))
}

View file

@ -1,19 +1,29 @@
use activitystreams_ext::{Ext1, UnparsedExtension};
use activitystreams_new::{
use activitystreams::{
activity::ActorAndObject,
actor::{Actor, ApActor},
primitives::XsdAnyUri,
iri_string::types::IriString,
unparsed::UnparsedMutExt,
};
use activitystreams_ext::{Ext1, UnparsedExtension};
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
#[derive(Clone, serde::Deserialize, serde::Serialize)]
#[serde(rename_all = "camelCase")]
pub struct PublicKeyInner {
pub id: XsdAnyUri,
pub owner: XsdAnyUri,
pub id: IriString,
pub owner: IriString,
pub public_key_pem: String,
}
impl std::fmt::Debug for PublicKeyInner {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("PublicKeyInner")
.field("id", &self.id.to_string())
.field("owner", &self.owner.to_string())
.field("public_key_pem", &self.public_key_pem)
.finish()
}
}
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
#[serde(rename_all = "camelCase")]
pub struct PublicKey {
@ -24,11 +34,13 @@ pub struct PublicKey {
#[serde(rename_all = "PascalCase")]
pub enum ValidTypes {
Accept,
Add,
Announce,
Create,
Delete,
Follow,
Reject,
Remove,
Undo,
Update,
}

View file

@ -1,54 +1,65 @@
use structopt::StructOpt;
use clap::Parser;
#[derive(Debug, StructOpt)]
#[derive(Debug, Parser)]
#[structopt(name = "relay", about = "An activitypub relay")]
pub struct Args {
#[structopt(short, help = "A list of domains that should be blocked")]
pub(crate) struct Args {
#[arg(short, help = "A list of domains that should be blocked")]
blocks: Vec<String>,
#[structopt(short, help = "A list of domains that should be whitelisted")]
whitelists: Vec<String>,
#[arg(short, help = "A list of domains that should be allowed")]
allowed: Vec<String>,
#[structopt(short, long, help = "Undo whitelisting or blocking domains")]
#[arg(short, long, help = "Undo allowing or blocking domains")]
undo: bool,
#[structopt(
short,
long,
help = "Only process background jobs, do not start the relay server"
)]
jobs_only: bool,
#[arg(short, long, help = "List allowed and blocked domains")]
list: bool,
#[structopt(
#[arg(short, long, help = "Get statistics from the server")]
stats: bool,
#[arg(
short,
long,
help = "Only run the relay server, do not process background jobs"
help = "List domains by when they were last succesfully contacted"
)]
no_jobs: bool,
contacted: bool,
}
impl Args {
pub fn new() -> Self {
Self::from_args()
pub(crate) fn any(&self) -> bool {
!self.blocks.is_empty()
|| !self.allowed.is_empty()
|| self.list
|| self.stats
|| self.contacted
}
pub fn blocks(&self) -> &[String] {
pub(crate) fn new() -> Self {
Self::parse()
}
pub(crate) fn blocks(&self) -> &[String] {
&self.blocks
}
pub fn whitelists(&self) -> &[String] {
&self.whitelists
pub(crate) fn allowed(&self) -> &[String] {
&self.allowed
}
pub fn undo(&self) -> bool {
pub(crate) fn undo(&self) -> bool {
self.undo
}
pub fn jobs_only(&self) -> bool {
self.jobs_only
pub(crate) fn list(&self) -> bool {
self.list
}
pub fn no_jobs(&self) -> bool {
self.no_jobs
pub(crate) fn stats(&self) -> bool {
self.stats
}
pub(crate) fn contacted(&self) -> bool {
self.contacted
}
}

View file

@ -1,8 +1,53 @@
use ructe::Ructe;
use std::{fs::File, io::Read, path::Path, process::Command};
fn main() -> Result<(), anyhow::Error> {
fn git_info() {
if let Ok(output) = Command::new("git").args(["rev-parse", "HEAD"]).output() {
if output.status.success() {
let git_hash = String::from_utf8_lossy(&output.stdout);
println!("cargo:rustc-env=GIT_HASH={git_hash}");
println!("cargo:rustc-env=GIT_SHORT_HASH={}", &git_hash[..8])
}
}
if let Ok(output) = Command::new("git")
.args(["rev-parse", "--abbrev-ref", "HEAD"])
.output()
{
if output.status.success() {
let git_branch = String::from_utf8_lossy(&output.stdout);
println!("cargo:rustc-env=GIT_BRANCH={git_branch}");
}
}
}
fn version_info() -> color_eyre::Result<()> {
let cargo_toml = Path::new(&std::env::var("CARGO_MANIFEST_DIR")?).join("Cargo.toml");
let mut file = File::open(cargo_toml)?;
let mut cargo_data = String::new();
file.read_to_string(&mut cargo_data)?;
let data: toml::Value = toml::from_str(&cargo_data)?;
if let Some(version) = data["package"]["version"].as_str() {
println!("cargo:rustc-env=PKG_VERSION={version}");
}
if let Some(name) = data["package"]["name"].as_str() {
println!("cargo:rustc-env=PKG_NAME={name}");
}
Ok(())
}
fn main() -> color_eyre::Result<()> {
dotenv::dotenv().ok();
git_info();
version_info()?;
let mut ructe = Ructe::from_env()?;
let mut statics = ructe.statics()?;
statics.add_sass_file("scss/index.scss")?;

425
src/collector.rs Normal file
View file

@ -0,0 +1,425 @@
use metrics::{Key, Metadata, Recorder, SetRecorderError};
use metrics_util::{
registry::{AtomicStorage, GenerationalStorage, Recency, Registry},
MetricKindMask, Summary,
};
use quanta::Clock;
use std::{
collections::{BTreeMap, HashMap},
sync::{atomic::Ordering, Arc, RwLock},
time::Duration,
};
const SECONDS: u64 = 1;
const MINUTES: u64 = 60 * SECONDS;
const HOURS: u64 = 60 * MINUTES;
const DAYS: u64 = 24 * HOURS;
pub(crate) fn recordable(len: usize) -> u32 {
((len as u64) % u64::from(u32::MAX)) as u32
}
type DistributionMap = BTreeMap<Vec<(String, String)>, Summary>;
#[derive(Clone)]
pub struct MemoryCollector {
inner: Arc<Inner>,
}
struct Inner {
descriptions: RwLock<HashMap<String, metrics::SharedString>>,
distributions: RwLock<HashMap<String, DistributionMap>>,
recency: Recency<Key>,
registry: Registry<Key, GenerationalStorage<AtomicStorage>>,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
struct Counter {
labels: BTreeMap<String, String>,
value: u64,
}
impl std::fmt::Display for Counter {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let labels = self
.labels
.iter()
.map(|(k, v)| format!("{k}: {v}"))
.collect::<Vec<_>>()
.join(", ");
write!(f, "{labels} - {}", self.value)
}
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
struct Gauge {
labels: BTreeMap<String, String>,
value: f64,
}
impl std::fmt::Display for Gauge {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let labels = self
.labels
.iter()
.map(|(k, v)| format!("{k}: {v}"))
.collect::<Vec<_>>()
.join(", ");
write!(f, "{labels} - {}", self.value)
}
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
struct Histogram {
labels: BTreeMap<String, String>,
value: Vec<(f64, Option<f64>)>,
}
impl std::fmt::Display for Histogram {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let labels = self
.labels
.iter()
.map(|(k, v)| format!("{k}: {v}"))
.collect::<Vec<_>>()
.join(", ");
let value = self
.value
.iter()
.map(|(k, v)| {
if let Some(v) = v {
format!("{k}: {v:.6}")
} else {
format!("{k}: None,")
}
})
.collect::<Vec<_>>()
.join(", ");
write!(f, "{labels} - {value}")
}
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
pub(crate) struct Snapshot {
counters: HashMap<String, Vec<Counter>>,
gauges: HashMap<String, Vec<Gauge>>,
histograms: HashMap<String, Vec<Histogram>>,
}
const PAIRS: [((&str, &str), &str); 2] = [
(
(
"background-jobs.worker.started",
"background-jobs.worker.finished",
),
"background-jobs.worker.running",
),
(
(
"background-jobs.job.started",
"background-jobs.job.finished",
),
"background-jobs.job.running",
),
];
#[derive(Default)]
struct MergeCounter {
start: Option<Counter>,
finish: Option<Counter>,
}
impl MergeCounter {
fn merge(self) -> Option<Counter> {
match (self.start, self.finish) {
(Some(start), Some(end)) => Some(Counter {
labels: start.labels,
value: start.value.saturating_sub(end.value),
}),
(Some(only), None) => Some(only),
(None, Some(only)) => Some(Counter {
labels: only.labels,
value: 0,
}),
(None, None) => None,
}
}
}
impl Snapshot {
pub(crate) fn present(self) {
if !self.counters.is_empty() {
println!("Counters");
let mut merging = HashMap::new();
for (key, counters) in self.counters {
if let Some(((start, _), name)) = PAIRS
.iter()
.find(|((start, finish), _)| *start == key || *finish == key)
{
let entry = merging.entry(name).or_insert_with(HashMap::new);
for counter in counters {
let merge_counter = entry
.entry(counter.labels.clone())
.or_insert_with(MergeCounter::default);
if key == *start {
merge_counter.start = Some(counter);
} else {
merge_counter.finish = Some(counter);
}
}
continue;
}
println!("\t{key}");
for counter in counters {
println!("\t\t{counter}");
}
}
for (key, counters) in merging {
println!("\t{key}");
for (_, counter) in counters {
if let Some(counter) = counter.merge() {
println!("\t\t{counter}");
}
}
}
}
if !self.gauges.is_empty() {
println!("Gauges");
for (key, gauges) in self.gauges {
println!("\t{key}");
for gauge in gauges {
println!("\t\t{gauge}");
}
}
}
if !self.histograms.is_empty() {
println!("Histograms");
for (key, histograms) in self.histograms {
println!("\t{key}");
for histogram in histograms {
println!("\t\t{histogram}");
}
}
}
}
}
fn key_to_parts(key: &Key) -> (String, Vec<(String, String)>) {
let labels = key
.labels()
.map(|label| (label.key().to_string(), label.value().to_string()))
.collect();
let name = key.name().to_string();
(name, labels)
}
impl Inner {
fn snapshot_counters(&self) -> HashMap<String, Vec<Counter>> {
let mut counters = HashMap::new();
for (key, counter) in self.registry.get_counter_handles() {
let gen = counter.get_generation();
if !self.recency.should_store_counter(&key, gen, &self.registry) {
continue;
}
let (name, labels) = key_to_parts(&key);
let value = counter.get_inner().load(Ordering::Acquire);
counters.entry(name).or_insert_with(Vec::new).push(Counter {
labels: labels.into_iter().collect(),
value,
});
}
counters
}
fn snapshot_gauges(&self) -> HashMap<String, Vec<Gauge>> {
let mut gauges = HashMap::new();
for (key, gauge) in self.registry.get_gauge_handles() {
let gen = gauge.get_generation();
if !self.recency.should_store_gauge(&key, gen, &self.registry) {
continue;
}
let (name, labels) = key_to_parts(&key);
let value = f64::from_bits(gauge.get_inner().load(Ordering::Acquire));
gauges.entry(name).or_insert_with(Vec::new).push(Gauge {
labels: labels.into_iter().collect(),
value,
})
}
gauges
}
fn snapshot_histograms(&self) -> HashMap<String, Vec<Histogram>> {
for (key, histogram) in self.registry.get_histogram_handles() {
let gen = histogram.get_generation();
let (name, labels) = key_to_parts(&key);
if !self
.recency
.should_store_histogram(&key, gen, &self.registry)
{
let mut d = self.distributions.write().unwrap();
let delete_by_name = if let Some(by_name) = d.get_mut(&name) {
by_name.remove(&labels);
by_name.is_empty()
} else {
false
};
drop(d);
if delete_by_name {
self.descriptions.write().unwrap().remove(&name);
}
continue;
}
let mut d = self.distributions.write().unwrap();
let outer_entry = d.entry(name.clone()).or_default();
let entry = outer_entry
.entry(labels)
.or_insert_with(Summary::with_defaults);
histogram.get_inner().clear_with(|samples| {
for sample in samples {
entry.add(*sample);
}
});
let mut total_len = 0;
for dist_map in d.values() {
total_len += dist_map.len();
}
metrics::gauge!("relay.collector.distributions.size").set(recordable(total_len));
}
let d = self.distributions.read().unwrap().clone();
d.into_iter()
.map(|(key, value)| {
(
key,
value
.into_iter()
.map(|(labels, summary)| Histogram {
labels: labels.into_iter().collect(),
value: [0.001, 0.01, 0.05, 0.1, 0.5, 0.9, 0.99, 1.0]
.into_iter()
.map(|q| (q, summary.quantile(q)))
.collect(),
})
.collect(),
)
})
.collect()
}
fn snapshot(&self) -> Snapshot {
Snapshot {
counters: self.snapshot_counters(),
gauges: self.snapshot_gauges(),
histograms: self.snapshot_histograms(),
}
}
}
impl MemoryCollector {
pub(crate) fn new() -> Self {
MemoryCollector {
inner: Arc::new(Inner {
descriptions: Default::default(),
distributions: Default::default(),
recency: Recency::new(
Clock::new(),
MetricKindMask::ALL,
Some(Duration::from_secs(5 * DAYS)),
),
registry: Registry::new(GenerationalStorage::atomic()),
}),
}
}
pub(crate) fn snapshot(&self) -> Snapshot {
self.inner.snapshot()
}
fn add_description_if_missing(
&self,
key: &metrics::KeyName,
description: metrics::SharedString,
) {
let mut d = self.inner.descriptions.write().unwrap();
d.entry(key.as_str().to_owned()).or_insert(description);
metrics::gauge!("relay.collector.descriptions.size").set(recordable(d.len()));
}
pub(crate) fn install(&self) -> Result<(), SetRecorderError<Self>> {
metrics::set_global_recorder(self.clone())
}
}
impl Recorder for MemoryCollector {
fn describe_counter(
&self,
key: metrics::KeyName,
_: Option<metrics::Unit>,
description: metrics::SharedString,
) {
self.add_description_if_missing(&key, description)
}
fn describe_gauge(
&self,
key: metrics::KeyName,
_: Option<metrics::Unit>,
description: metrics::SharedString,
) {
self.add_description_if_missing(&key, description)
}
fn describe_histogram(
&self,
key: metrics::KeyName,
_: Option<metrics::Unit>,
description: metrics::SharedString,
) {
self.add_description_if_missing(&key, description)
}
fn register_counter(&self, key: &Key, _: &Metadata<'_>) -> metrics::Counter {
self.inner
.registry
.get_or_create_counter(key, |c| c.clone().into())
}
fn register_gauge(&self, key: &Key, _: &Metadata<'_>) -> metrics::Gauge {
self.inner
.registry
.get_or_create_gauge(key, |c| c.clone().into())
}
fn register_histogram(&self, key: &Key, _: &Metadata<'_>) -> metrics::Histogram {
self.inner
.registry
.get_or_create_histogram(key, |c| c.clone().into())
}
}

View file

@ -1,41 +1,102 @@
use crate::{data::ActorCache, error::MyError, middleware::MyVerify, requests::Requests};
use activitystreams_new::{primitives::XsdAnyUri, uri};
use crate::{
error::Error,
extractors::{AdminConfig, XApiToken},
};
use activitystreams::{
iri,
iri_string::{
format::ToDedicatedString,
resolve::FixedBaseResolver,
types::{IriAbsoluteString, IriFragmentStr, IriRelativeStr, IriString},
},
};
use config::Environment;
use http_signature_normalization_actix::prelude::{VerifyDigest, VerifySignature};
use sha2::{Digest, Sha256};
use std::net::IpAddr;
use http_signature_normalization_actix::{digest::ring::Sha256, prelude::VerifyDigest};
use rustls::sign::CertifiedKey;
use std::{
net::{IpAddr, SocketAddr},
path::PathBuf,
};
use uuid::Uuid;
#[derive(Clone, Debug, serde::Deserialize)]
pub struct ParsedConfig {
pub(crate) struct ParsedConfig {
hostname: String,
addr: IpAddr,
port: u16,
debug: bool,
whitelist_mode: bool,
restricted_mode: bool,
validate_signatures: bool,
https: bool,
database_url: String,
pretty_log: bool,
publish_blocks: bool,
max_connections: usize,
sled_path: PathBuf,
source_repo: IriString,
repository_commit_base: String,
opentelemetry_url: Option<IriString>,
telegram_token: Option<String>,
telegram_admin_handle: Option<String>,
api_token: Option<String>,
tls_key: Option<PathBuf>,
tls_cert: Option<PathBuf>,
footer_blurb: Option<String>,
local_domains: Option<String>,
local_blurb: Option<String>,
prometheus_addr: Option<IpAddr>,
prometheus_port: Option<u16>,
deliver_concurrency: u64,
client_timeout: u64,
proxy_url: Option<IriString>,
proxy_username: Option<String>,
proxy_password: Option<String>,
signature_threads: Option<usize>,
}
#[derive(Clone, Debug)]
#[derive(Clone)]
pub struct Config {
hostname: String,
addr: IpAddr,
port: u16,
debug: bool,
whitelist_mode: bool,
restricted_mode: bool,
validate_signatures: bool,
database_url: String,
pretty_log: bool,
publish_blocks: bool,
max_connections: usize,
base_uri: XsdAnyUri,
base_uri: IriAbsoluteString,
sled_path: PathBuf,
source_repo: IriString,
opentelemetry_url: Option<IriString>,
telegram_token: Option<String>,
telegram_admin_handle: Option<String>,
api_token: Option<String>,
tls: Option<TlsConfig>,
footer_blurb: Option<String>,
local_domains: Vec<String>,
local_blurb: Option<String>,
prometheus_config: Option<PrometheusConfig>,
deliver_concurrency: u64,
client_timeout: u64,
proxy_config: Option<ProxyConfig>,
signature_threads: Option<usize>,
}
#[derive(Clone)]
struct TlsConfig {
key: PathBuf,
cert: PathBuf,
}
#[derive(Clone, Debug)]
struct PrometheusConfig {
addr: IpAddr,
port: u16,
}
#[derive(Clone, Debug)]
struct ProxyConfig {
url: IriString,
auth: Option<(String, String)>,
}
#[derive(Debug)]
pub enum UrlKind {
Activity,
Actor,
@ -49,55 +110,286 @@ pub enum UrlKind {
Outbox,
}
#[derive(Debug)]
pub enum AdminUrlKind {
Allow,
Disallow,
Block,
Unblock,
Allowed,
Blocked,
Connected,
Stats,
LastSeen,
}
impl std::fmt::Debug for Config {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Config")
.field("hostname", &self.hostname)
.field("addr", &self.addr)
.field("port", &self.port)
.field("debug", &self.debug)
.field("restricted_mode", &self.restricted_mode)
.field("validate_signatures", &self.validate_signatures)
.field("publish_blocks", &self.publish_blocks)
.field("base_uri", &self.base_uri.to_string())
.field("sled_path", &self.sled_path)
.field("source_repo", &self.source_repo.to_string())
.field(
"opentelemetry_url",
&self.opentelemetry_url.as_ref().map(|url| url.to_string()),
)
.field("telegram_token", &"[redacted]")
.field("telegram_admin_handle", &self.telegram_admin_handle)
.field("api_token", &"[redacted]")
.field("tls_key", &"[redacted]")
.field("tls_cert", &"[redacted]")
.field("footer_blurb", &self.footer_blurb)
.field("local_domains", &self.local_domains)
.field("local_blurb", &self.local_blurb)
.field("prometheus_config", &self.prometheus_config)
.field("deliver_concurrency", &self.deliver_concurrency)
.field("client_timeout", &self.client_timeout)
.field("proxy_config", &self.proxy_config)
.field("signature_threads", &self.signature_threads)
.finish()
}
}
impl Config {
pub fn build() -> Result<Self, MyError> {
let mut config = config::Config::new();
config
pub(crate) fn build() -> Result<Self, Error> {
let config = config::Config::builder()
.set_default("hostname", "localhost:8080")?
.set_default("addr", "127.0.0.1")?
.set_default("port", 8080)?
.set_default("port", 8080u64)?
.set_default("debug", true)?
.set_default("whitelist_mode", false)?
.set_default("validate_signatures", false)?
.set_default("https", false)?
.set_default("pretty_log", true)?
.set_default("restricted_mode", false)?
.set_default("validate_signatures", true)?
.set_default("https", true)?
.set_default("publish_blocks", false)?
.set_default("max_connections", 2)?
.merge(Environment::new())?;
.set_default("sled_path", "./sled/db-0-34")?
.set_default("source_repo", "https://git.asonix.dog/asonix/relay")?
.set_default("repository_commit_base", "/src/commit/")?
.set_default("opentelemetry_url", None as Option<&str>)?
.set_default("telegram_token", None as Option<&str>)?
.set_default("telegram_admin_handle", None as Option<&str>)?
.set_default("api_token", None as Option<&str>)?
.set_default("tls_key", None as Option<&str>)?
.set_default("tls_cert", None as Option<&str>)?
.set_default("footer_blurb", None as Option<&str>)?
.set_default("local_domains", None as Option<&str>)?
.set_default("local_blurb", None as Option<&str>)?
.set_default("prometheus_addr", None as Option<&str>)?
.set_default("prometheus_port", None as Option<u16>)?
.set_default("deliver_concurrency", 8u64)?
.set_default("client_timeout", 10u64)?
.set_default("proxy_url", None as Option<&str>)?
.set_default("proxy_username", None as Option<&str>)?
.set_default("proxy_password", None as Option<&str>)?
.set_default("signature_threads", None as Option<u64>)?
.add_source(Environment::default())
.build()?;
let config: ParsedConfig = config.try_into()?;
let config: ParsedConfig = config.try_deserialize()?;
let scheme = if config.https { "https" } else { "http" };
let base_uri = uri!(format!("{}://{}", scheme, config.hostname));
let base_uri = iri!(format!("{scheme}://{}", config.hostname)).into_absolute();
let tls = match (config.tls_key, config.tls_cert) {
(Some(key), Some(cert)) => Some(TlsConfig { key, cert }),
(Some(_), None) => {
tracing::warn!("TLS_KEY is set but TLS_CERT isn't , not building TLS config");
None
}
(None, Some(_)) => {
tracing::warn!("TLS_CERT is set but TLS_KEY isn't , not building TLS config");
None
}
(None, None) => None,
};
let local_domains = config
.local_domains
.iter()
.flat_map(|s| s.split(','))
.map(|d| d.to_string())
.collect();
let prometheus_config = match (config.prometheus_addr, config.prometheus_port) {
(Some(addr), Some(port)) => Some(PrometheusConfig { addr, port }),
(Some(_), None) => {
tracing::warn!("PROMETHEUS_ADDR is set but PROMETHEUS_PORT is not set, not building Prometheus config");
None
}
(None, Some(_)) => {
tracing::warn!("PROMETHEUS_PORT is set but PROMETHEUS_ADDR is not set, not building Prometheus config");
None
}
(None, None) => None,
};
let proxy_config = match (config.proxy_username, config.proxy_password) {
(Some(username), Some(password)) => config.proxy_url.map(|url| ProxyConfig {
url,
auth: Some((username, password)),
}),
(Some(_), None) => {
tracing::warn!(
"PROXY_USERNAME is set but PROXY_PASSWORD is not set, not setting Proxy Auth"
);
config.proxy_url.map(|url| ProxyConfig { url, auth: None })
}
(None, Some(_)) => {
tracing::warn!(
"PROXY_PASSWORD is set but PROXY_USERNAME is not set, not setting Proxy Auth"
);
config.proxy_url.map(|url| ProxyConfig { url, auth: None })
}
(None, None) => config.proxy_url.map(|url| ProxyConfig { url, auth: None }),
};
let source_url = match Self::git_hash() {
Some(hash) => format!(
"{}{}{hash}",
config.source_repo, config.repository_commit_base
)
.parse()
.expect("constructed source URL is valid"),
None => config.source_repo.clone(),
};
Ok(Config {
hostname: config.hostname,
addr: config.addr,
port: config.port,
debug: config.debug,
whitelist_mode: config.whitelist_mode,
restricted_mode: config.restricted_mode,
validate_signatures: config.validate_signatures,
database_url: config.database_url,
pretty_log: config.pretty_log,
publish_blocks: config.publish_blocks,
max_connections: config.max_connections,
base_uri,
sled_path: config.sled_path,
source_repo: source_url,
opentelemetry_url: config.opentelemetry_url,
telegram_token: config.telegram_token,
telegram_admin_handle: config.telegram_admin_handle,
api_token: config.api_token,
tls,
footer_blurb: config.footer_blurb,
local_domains,
local_blurb: config.local_blurb,
prometheus_config,
deliver_concurrency: config.deliver_concurrency,
client_timeout: config.client_timeout,
proxy_config,
signature_threads: config.signature_threads,
})
}
pub fn pretty_log(&self) -> bool {
self.pretty_log
pub(crate) fn signature_threads(&self) -> usize {
self.signature_threads
.unwrap_or_else(|| {
std::thread::available_parallelism()
.map(usize::from)
.map_err(|e| tracing::warn!("Failed to get parallelism, {e}"))
.unwrap_or(1)
})
.max(1)
}
pub fn max_connections(&self) -> usize {
self.max_connections
pub(crate) fn client_timeout(&self) -> u64 {
self.client_timeout
}
pub fn validate_signatures(&self) -> bool {
pub(crate) fn deliver_concurrency(&self) -> u64 {
self.deliver_concurrency
}
pub(crate) fn prometheus_bind_address(&self) -> Option<SocketAddr> {
let config = self.prometheus_config.as_ref()?;
Some((config.addr, config.port).into())
}
pub(crate) async fn open_keys(&self) -> Result<Option<CertifiedKey>, Error> {
let tls = if let Some(tls) = &self.tls {
tls
} else {
tracing::info!("No TLS config present");
return Ok(None);
};
let certs_bytes = tokio::fs::read(&tls.cert).await?;
let certs =
rustls_pemfile::certs(&mut certs_bytes.as_slice()).collect::<Result<Vec<_>, _>>()?;
if certs.is_empty() {
tracing::warn!("No certs read from certificate file");
return Ok(None);
}
let key_bytes = tokio::fs::read(&tls.key).await?;
let key = if let Some(key) = rustls_pemfile::private_key(&mut key_bytes.as_slice())? {
key
} else {
tracing::warn!("Failed to read private key");
return Ok(None);
};
let key = rustls::crypto::ring::sign::any_supported_type(&key)?;
Ok(Some(CertifiedKey::new(certs, key)))
}
pub(crate) fn footer_blurb(&self) -> Option<crate::templates::Html<String>> {
if let Some(blurb) = &self.footer_blurb {
if !blurb.is_empty() {
return Some(crate::templates::Html(
ammonia::Builder::new()
.add_tag_attributes("a", &["rel"])
.add_tag_attributes("area", &["rel"])
.add_tag_attributes("link", &["rel"])
.link_rel(None)
.clean(blurb)
.to_string(),
));
}
}
None
}
pub(crate) fn local_blurb(&self) -> Option<crate::templates::Html<String>> {
if let Some(blurb) = &self.local_blurb {
if !blurb.is_empty() {
return Some(crate::templates::Html(
ammonia::Builder::new()
.add_tag_attributes("a", &["rel"])
.add_tag_attributes("area", &["rel"])
.add_tag_attributes("link", &["rel"])
.link_rel(None)
.clean(blurb)
.to_string(),
));
}
}
None
}
pub(crate) fn local_domains(&self) -> &[String] {
&self.local_domains
}
pub(crate) fn sled_path(&self) -> &PathBuf {
&self.sled_path
}
pub(crate) fn validate_signatures(&self) -> bool {
self.validate_signatures
}
pub fn digest_middleware(&self) -> VerifyDigest<Sha256> {
pub(crate) fn digest_middleware(&self) -> VerifyDigest<Sha256> {
if self.validate_signatures {
VerifyDigest::new(Sha256::new())
} else {
@ -105,78 +397,187 @@ impl Config {
}
}
pub fn signature_middleware(
&self,
requests: Requests,
actors: ActorCache,
) -> VerifySignature<MyVerify> {
if self.validate_signatures {
VerifySignature::new(MyVerify(requests, actors), Default::default())
pub(crate) fn x_api_token(&self) -> Option<XApiToken> {
self.api_token.clone().map(XApiToken::new)
}
pub(crate) fn admin_config(&self) -> Option<actix_web::web::Data<AdminConfig>> {
if let Some(api_token) = &self.api_token {
match AdminConfig::build(api_token) {
Ok(conf) => Some(actix_web::web::Data::new(conf)),
Err(e) => {
tracing::error!("Error creating admin config: {e}");
None
}
}
} else {
VerifySignature::new(MyVerify(requests, actors), Default::default()).optional()
None
}
}
pub fn bind_address(&self) -> (IpAddr, u16) {
pub(crate) fn bind_address(&self) -> (IpAddr, u16) {
(self.addr, self.port)
}
pub fn debug(&self) -> bool {
pub(crate) fn debug(&self) -> bool {
self.debug
}
pub fn publish_blocks(&self) -> bool {
pub(crate) fn publish_blocks(&self) -> bool {
self.publish_blocks
}
pub fn whitelist_mode(&self) -> bool {
self.whitelist_mode
pub(crate) fn restricted_mode(&self) -> bool {
self.restricted_mode
}
pub fn database_url(&self) -> &str {
&self.database_url
}
pub fn hostname(&self) -> &str {
pub(crate) fn hostname(&self) -> &str {
&self.hostname
}
pub fn generate_resource(&self) -> String {
pub(crate) fn generate_resource(&self) -> String {
format!("relay@{}", self.hostname)
}
pub fn software_name(&self) -> String {
"AodeRelay".to_owned()
pub(crate) fn software_name() -> &'static str {
"AodeRelay"
}
pub fn software_version(&self) -> String {
"v0.1.0-master".to_owned()
pub(crate) fn software_version() -> String {
if let Some(git) = Self::git_version() {
return format!("v{}-{git}", Self::version());
}
format!("v{}", Self::version())
}
pub fn source_code(&self) -> String {
"https://git.asonix.dog/asonix/ap-relay".to_owned()
fn git_version() -> Option<String> {
let branch = Self::git_branch()?;
let hash = Self::git_short_hash()?;
Some(format!("{branch}-{hash}"))
}
pub fn generate_url(&self, kind: UrlKind) -> XsdAnyUri {
let mut uri = self.base_uri.clone();
let url = uri.as_url_mut();
fn name() -> &'static str {
env!("PKG_NAME")
}
match kind {
UrlKind::Activity => url.set_path(&format!("activity/{}", Uuid::new_v4())),
UrlKind::Actor => url.set_path("actor"),
UrlKind::Followers => url.set_path("followers"),
UrlKind::Following => url.set_path("following"),
UrlKind::Inbox => url.set_path("inbox"),
UrlKind::Index => (),
fn version() -> &'static str {
env!("PKG_VERSION")
}
fn git_branch() -> Option<&'static str> {
option_env!("GIT_BRANCH")
}
fn git_hash() -> Option<&'static str> {
option_env!("GIT_HASH")
}
fn git_short_hash() -> Option<&'static str> {
option_env!("GIT_SHORT_HASH")
}
pub(crate) fn user_agent(&self) -> String {
format!(
"{} ({}/{}; +{})",
Self::software_name(),
Self::name(),
Self::software_version(),
self.generate_url(UrlKind::Index),
)
}
pub(crate) fn proxy_config(&self) -> Option<(&IriString, Option<(&str, &str)>)> {
self.proxy_config.as_ref().map(|ProxyConfig { url, auth }| {
(url, auth.as_ref().map(|(u, p)| (u.as_str(), p.as_str())))
})
}
pub(crate) fn source_code(&self) -> &IriString {
&self.source_repo
}
pub(crate) fn opentelemetry_url(&self) -> Option<&IriString> {
self.opentelemetry_url.as_ref()
}
pub(crate) fn telegram_info(&self) -> Option<(&str, &str)> {
self.telegram_token.as_deref().and_then(|token| {
let handle = self.telegram_admin_handle.as_deref()?;
Some((token, handle))
})
}
pub(crate) fn generate_url(&self, kind: UrlKind) -> IriString {
self.do_generate_url(kind).expect("Generated valid IRI")
}
fn do_generate_url(&self, kind: UrlKind) -> Result<IriString, Error> {
let iri = match kind {
UrlKind::Activity => FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new(&format!("activity/{}", Uuid::new_v4()))?.as_ref())
.try_to_dedicated_string()?,
UrlKind::Actor => FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new("actor")?.as_ref())
.try_to_dedicated_string()?,
UrlKind::Followers => FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new("followers")?.as_ref())
.try_to_dedicated_string()?,
UrlKind::Following => FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new("following")?.as_ref())
.try_to_dedicated_string()?,
UrlKind::Inbox => FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new("inbox")?.as_ref())
.try_to_dedicated_string()?,
UrlKind::Index => self.base_uri.clone().into(),
UrlKind::MainKey => {
url.set_path("actor");
url.set_fragment(Some("main-key"));
let actor = IriRelativeStr::new("actor")?;
let fragment = IriFragmentStr::new("main-key")?;
let mut resolved = FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(actor.as_ref())
.try_to_dedicated_string()?;
resolved.set_fragment(Some(fragment));
resolved
}
UrlKind::Media(uuid) => url.set_path(&format!("media/{}", uuid)),
UrlKind::NodeInfo => url.set_path("nodeinfo/2.0.json"),
UrlKind::Outbox => url.set_path("outbox"),
UrlKind::Media(uuid) => FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new(&format!("media/{uuid}"))?.as_ref())
.try_to_dedicated_string()?,
UrlKind::NodeInfo => FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new("nodeinfo/2.0.json")?.as_ref())
.try_to_dedicated_string()?,
UrlKind::Outbox => FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new("outbox")?.as_ref())
.try_to_dedicated_string()?,
};
uri
Ok(iri)
}
pub(crate) fn generate_admin_url(&self, kind: AdminUrlKind) -> IriString {
self.do_generate_admin_url(kind)
.expect("Generated valid IRI")
}
fn do_generate_admin_url(&self, kind: AdminUrlKind) -> Result<IriString, Error> {
let path = match kind {
AdminUrlKind::Allow => "api/v1/admin/allow",
AdminUrlKind::Disallow => "api/v1/admin/disallow",
AdminUrlKind::Block => "api/v1/admin/block",
AdminUrlKind::Unblock => "api/v1/admin/unblock",
AdminUrlKind::Allowed => "api/v1/admin/allowed",
AdminUrlKind::Blocked => "api/v1/admin/blocked",
AdminUrlKind::Connected => "api/v1/admin/connected",
AdminUrlKind::Stats => "api/v1/admin/stats",
AdminUrlKind::LastSeen => "api/v1/admin/last_seen",
};
let iri = FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new(path)?.as_ref())
.try_to_dedicated_string()?;
Ok(iri)
}
}

11
src/data.rs Normal file
View file

@ -0,0 +1,11 @@
mod actor;
mod last_online;
mod media;
mod node;
mod state;
pub(crate) use actor::ActorCache;
pub(crate) use last_online::LastOnline;
pub(crate) use media::MediaCache;
pub(crate) use node::{Node, NodeCache};
pub(crate) use state::State;

View file

@ -1,10 +1,11 @@
use crate::{apub::AcceptedActors, db::Db, error::MyError, requests::Requests};
use activitystreams_new::{prelude::*, primitives::XsdAnyUri, uri};
use log::error;
use std::{collections::HashSet, sync::Arc, time::Duration};
use tokio::sync::RwLock;
use ttl_cache::TtlCache;
use uuid::Uuid;
use crate::{
apub::AcceptedActors,
db::{Actor, Db},
error::{Error, ErrorKind},
requests::{BreakerStrategy, Requests},
};
use activitystreams::{iri_string::types::IriString, prelude::*};
use std::time::{Duration, SystemTime};
const REFETCH_DURATION: Duration = Duration::from_secs(60 * 30);
@ -15,105 +16,37 @@ pub enum MaybeCached<T> {
}
impl<T> MaybeCached<T> {
pub fn is_cached(&self) -> bool {
match self {
MaybeCached::Cached(_) => true,
_ => false,
}
pub(crate) fn is_cached(&self) -> bool {
matches!(self, MaybeCached::Cached(_))
}
pub fn into_inner(self) -> T {
pub(crate) fn into_inner(self) -> T {
match self {
MaybeCached::Cached(t) | MaybeCached::Fetched(t) => t,
}
}
}
#[derive(Clone)]
#[derive(Clone, Debug)]
pub struct ActorCache {
db: Db,
cache: Arc<RwLock<TtlCache<XsdAnyUri, Actor>>>,
following: Arc<RwLock<HashSet<XsdAnyUri>>>,
}
impl ActorCache {
pub fn new(db: Db) -> Self {
let cache = ActorCache {
db,
cache: Arc::new(RwLock::new(TtlCache::new(1024 * 8))),
following: Arc::new(RwLock::new(HashSet::new())),
};
cache.spawn_rehydrate();
cache
pub(crate) fn new(db: Db) -> Self {
ActorCache { db }
}
pub async fn is_following(&self, id: &XsdAnyUri) -> bool {
self.following.read().await.contains(id)
}
pub async fn get_no_cache(
#[tracing::instrument(level = "debug" name = "Get Actor", skip_all, fields(id = id.to_string().as_str()))]
pub(crate) async fn get(
&self,
id: &XsdAnyUri,
id: &IriString,
requests: &Requests,
) -> Result<Actor, MyError> {
let accepted_actor = requests.fetch::<AcceptedActors>(id.as_str()).await?;
let input_host = id.as_url().host();
let accepted_actor_id = accepted_actor.id().ok_or(MyError::MissingId)?;
let actor_host = accepted_actor_id.as_url().host();
let inbox_host = get_inbox(&accepted_actor).as_url().host();
if input_host != actor_host {
let input_host = input_host.map(|h| h.to_string()).unwrap_or_default();
let actor_host = actor_host.map(|h| h.to_string()).unwrap_or_default();
return Err(MyError::HostMismatch(input_host, actor_host));
}
if actor_host != inbox_host {
let actor_host = actor_host.map(|h| h.to_string()).unwrap_or_default();
let inbox_host = inbox_host.map(|h| h.to_string()).unwrap_or_default();
return Err(MyError::HostMismatch(actor_host, inbox_host));
}
let inbox = get_inbox(&accepted_actor).clone();
let actor = Actor {
id: accepted_actor_id.clone(),
public_key: accepted_actor.ext_one.public_key.public_key_pem,
public_key_id: accepted_actor.ext_one.public_key.id,
inbox,
};
self.cache
.write()
.await
.insert(id.clone(), actor.clone(), REFETCH_DURATION);
self.update(id, &actor.public_key, &actor.public_key_id)
.await?;
Ok(actor)
}
pub async fn get(
&self,
id: &XsdAnyUri,
requests: &Requests,
) -> Result<MaybeCached<Actor>, MyError> {
if let Some(actor) = self.cache.read().await.get(id) {
return Ok(MaybeCached::Cached(actor.clone()));
}
if let Some(actor) = self.lookup(id).await? {
self.cache
.write()
.await
.insert(id.clone(), actor.clone(), REFETCH_DURATION);
return Ok(MaybeCached::Cached(actor));
) -> Result<MaybeCached<Actor>, Error> {
if let Some(actor) = self.db.actor(id.clone()).await? {
if actor.saved_at + REFETCH_DURATION > SystemTime::now() {
return Ok(MaybeCached::Cached(actor));
}
}
self.get_no_cache(id, requests)
@ -121,231 +54,51 @@ impl ActorCache {
.map(MaybeCached::Fetched)
}
pub async fn follower(&self, actor: &Actor) -> Result<(), MyError> {
self.save(actor.clone()).await
#[tracing::instrument(level = "debug", name = "Add Connection", skip(self))]
pub(crate) async fn add_connection(&self, actor: Actor) -> Result<(), Error> {
self.db.add_connection(actor.id.clone()).await?;
self.db.save_actor(actor).await
}
pub async fn cache_follower(&self, id: XsdAnyUri) {
self.following.write().await.insert(id);
#[tracing::instrument(level = "debug", name = "Remove Connection", skip(self))]
pub(crate) async fn remove_connection(&self, actor: &Actor) -> Result<(), Error> {
self.db.remove_connection(actor.id.clone()).await
}
pub async fn bust_follower(&self, id: &XsdAnyUri) {
self.following.write().await.remove(id);
}
pub async fn unfollower(&self, actor: &Actor) -> Result<Option<Uuid>, MyError> {
let row_opt = self
.db
.pool()
.get()
.await?
.query_opt(
"DELETE FROM actors
WHERE actor_id = $1::TEXT
RETURNING listener_id;",
&[&actor.id.as_str()],
)
.await?;
let row = if let Some(row) = row_opt {
row
} else {
return Ok(None);
};
let listener_id: Uuid = row.try_get(0)?;
let row_opt = self
.db
.pool()
.get()
.await?
.query_opt(
"SELECT FROM actors
WHERE listener_id = $1::UUID;",
&[&listener_id],
)
.await?;
if row_opt.is_none() {
return Ok(Some(listener_id));
}
Ok(None)
}
async fn lookup(&self, id: &XsdAnyUri) -> Result<Option<Actor>, MyError> {
let row_opt = self
.db
.pool()
.get()
.await?
.query_opt(
"SELECT listeners.actor_id, actors.public_key, actors.public_key_id
FROM listeners
INNER JOIN actors ON actors.listener_id = listeners.id
WHERE
actors.actor_id = $1::TEXT
AND
actors.updated_at + INTERVAL '120 seconds' < NOW()
LIMIT 1;",
&[&id.as_str()],
)
.await?;
let row = if let Some(row) = row_opt {
row
} else {
return Ok(None);
};
let inbox: String = row.try_get(0)?;
let public_key_id: String = row.try_get(2)?;
Ok(Some(Actor {
id: id.clone(),
inbox: uri!(inbox),
public_key: row.try_get(1)?,
public_key_id: uri!(public_key_id),
}))
}
async fn save(&self, actor: Actor) -> Result<(), MyError> {
let row_opt = self
.db
.pool()
.get()
.await?
.query_opt(
"SELECT id FROM listeners WHERE actor_id = $1::TEXT LIMIT 1;",
&[&actor.inbox.as_str()],
)
.await?;
let row = if let Some(row) = row_opt {
row
} else {
return Err(MyError::NotSubscribed(actor.id.as_str().to_owned()));
};
let listener_id: Uuid = row.try_get(0)?;
self.db
.pool()
.get()
.await?
.execute(
"INSERT INTO actors (
actor_id,
public_key,
public_key_id,
listener_id,
created_at,
updated_at
) VALUES (
$1::TEXT,
$2::TEXT,
$3::TEXT,
$4::UUID,
'now',
'now'
) ON CONFLICT (actor_id)
DO UPDATE SET public_key = $2::TEXT;",
&[
&actor.id.as_str(),
&actor.public_key,
&actor.public_key_id.as_str(),
&listener_id,
],
)
.await?;
Ok(())
}
async fn update(
#[tracing::instrument(level = "debug", name = "Fetch remote actor", skip_all, fields(id = id.to_string().as_str()))]
pub(crate) async fn get_no_cache(
&self,
id: &XsdAnyUri,
public_key: &str,
public_key_id: &XsdAnyUri,
) -> Result<(), MyError> {
self.db
.pool()
.get()
.await?
.execute(
"UPDATE actors
SET public_key = $2::TEXT, public_key_id = $3::TEXT
WHERE actor_id = $1::TEXT;",
&[&id.as_str(), &public_key, &public_key_id.as_str()],
)
id: &IriString,
requests: &Requests,
) -> Result<Actor, Error> {
let accepted_actor = requests
.fetch::<AcceptedActors>(id, BreakerStrategy::Require2XX)
.await?;
Ok(())
}
let input_authority = id.authority_components().ok_or(ErrorKind::MissingDomain)?;
let accepted_actor_id = accepted_actor
.id(input_authority.host(), input_authority.port())?
.ok_or(ErrorKind::MissingId)?;
fn spawn_rehydrate(&self) {
use actix_rt::time::{interval_at, Instant};
let inbox = get_inbox(&accepted_actor)?.clone();
let this = self.clone();
actix_rt::spawn(async move {
let mut interval = interval_at(Instant::now(), Duration::from_secs(60 * 10));
let actor = Actor {
id: accepted_actor_id.clone(),
public_key: accepted_actor.ext_one.public_key.public_key_pem,
public_key_id: accepted_actor.ext_one.public_key.id,
inbox,
saved_at: SystemTime::now(),
};
loop {
if let Err(e) = this.rehydrate().await {
error!("Error rehydrating follows, {}", e);
}
self.db.save_actor(actor.clone()).await?;
interval.tick().await;
}
});
}
async fn rehydrate(&self) -> Result<(), MyError> {
let rows = self
.db
.pool()
.get()
.await?
.query("SELECT actor_id FROM actors;", &[])
.await?;
let actor_ids = rows
.into_iter()
.filter_map(|row| match row.try_get(0) {
Ok(s) => {
let s: String = s;
match s.parse() {
Ok(s) => Some(s),
Err(e) => {
error!("Error parsing actor id, {}", e);
None
}
}
}
Err(e) => {
error!("Error getting actor id from row, {}", e);
None
}
})
.collect();
let mut write_guard = self.following.write().await;
*write_guard = actor_ids;
Ok(())
Ok(actor)
}
}
fn get_inbox(actor: &AcceptedActors) -> &XsdAnyUri {
actor
.endpoints()
fn get_inbox(actor: &AcceptedActors) -> Result<&IriString, Error> {
Ok(actor
.endpoints()?
.and_then(|e| e.shared_inbox.as_ref())
.unwrap_or(actor.inbox())
}
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct Actor {
pub id: XsdAnyUri,
pub public_key: String,
pub public_key_id: XsdAnyUri,
pub inbox: XsdAnyUri,
.unwrap_or(actor.inbox()?))
}

28
src/data/last_online.rs Normal file
View file

@ -0,0 +1,28 @@
use activitystreams::iri_string::types::IriStr;
use std::{collections::HashMap, sync::Mutex};
use time::OffsetDateTime;
pub(crate) struct LastOnline {
domains: Mutex<HashMap<String, OffsetDateTime>>,
}
impl LastOnline {
pub(crate) fn mark_seen(&self, iri: &IriStr) {
if let Some(authority) = iri.authority_str() {
let mut guard = self.domains.lock().unwrap();
guard.insert(authority.to_string(), OffsetDateTime::now_utc());
metrics::gauge!("relay.last-online.size",)
.set(crate::collector::recordable(guard.len()));
}
}
pub(crate) fn take(&self) -> HashMap<String, OffsetDateTime> {
std::mem::take(&mut *self.domains.lock().unwrap())
}
pub(crate) fn empty() -> Self {
Self {
domains: Mutex::new(HashMap::default()),
}
}
}

View file

@ -1,171 +1,33 @@
use crate::{db::Db, error::MyError};
use activitystreams_new::primitives::XsdAnyUri;
use async_mutex::Mutex;
use bytes::Bytes;
use futures::join;
use lru::LruCache;
use std::{collections::HashMap, sync::Arc, time::Duration};
use tokio::sync::RwLock;
use ttl_cache::TtlCache;
use crate::{db::Db, error::Error};
use activitystreams::iri_string::types::IriString;
use uuid::Uuid;
static MEDIA_DURATION: Duration = Duration::from_secs(60 * 60 * 24 * 2);
#[derive(Clone)]
pub struct Media {
#[derive(Clone, Debug)]
pub struct MediaCache {
db: Db,
inverse: Arc<Mutex<HashMap<XsdAnyUri, Uuid>>>,
url_cache: Arc<Mutex<LruCache<Uuid, XsdAnyUri>>>,
byte_cache: Arc<RwLock<TtlCache<Uuid, (String, Bytes)>>>,
}
impl Media {
pub fn new(db: Db) -> Self {
Media {
db,
inverse: Arc::new(Mutex::new(HashMap::new())),
url_cache: Arc::new(Mutex::new(LruCache::new(128))),
byte_cache: Arc::new(RwLock::new(TtlCache::new(128))),
}
impl MediaCache {
pub(crate) fn new(db: Db) -> Self {
MediaCache { db }
}
pub async fn get_uuid(&self, url: &XsdAnyUri) -> Result<Option<Uuid>, MyError> {
let res = self.inverse.lock().await.get(url).cloned();
let uuid = match res {
Some(uuid) => uuid,
_ => {
let row_opt = self
.db
.pool()
.get()
.await?
.query_opt(
"SELECT media_id
FROM media
WHERE url = $1::TEXT
LIMIT 1;",
&[&url.as_str()],
)
.await?;
if let Some(row) = row_opt {
let uuid: Uuid = row.try_get(0)?;
self.inverse.lock().await.insert(url.clone(), uuid);
uuid
} else {
return Ok(None);
}
}
};
if self.url_cache.lock().await.contains(&uuid) {
return Ok(Some(uuid));
}
let row_opt = self
.db
.pool()
.get()
.await?
.query_opt(
"SELECT id
FROM media
WHERE
url = $1::TEXT
AND
media_id = $2::UUID
LIMIT 1;",
&[&url.as_str(), &uuid],
)
.await?;
if row_opt.is_some() {
self.url_cache.lock().await.put(uuid, url.clone());
return Ok(Some(uuid));
}
self.inverse.lock().await.remove(url);
Ok(None)
#[tracing::instrument(level = "debug", name = "Get media uuid", skip_all, fields(url = url.to_string().as_str()))]
pub(crate) async fn get_uuid(&self, url: IriString) -> Result<Option<Uuid>, Error> {
self.db.media_id(url).await
}
pub async fn get_url(&self, uuid: Uuid) -> Result<Option<XsdAnyUri>, MyError> {
if let Some(url) = self.url_cache.lock().await.get(&uuid).cloned() {
return Ok(Some(url));
}
let row_opt = self
.db
.pool()
.get()
.await?
.query_opt(
"SELECT url
FROM media
WHERE media_id = $1::UUID
LIMIT 1;",
&[&uuid],
)
.await?;
if let Some(row) = row_opt {
let url: String = row.try_get(0)?;
let url: XsdAnyUri = url.parse()?;
return Ok(Some(url));
}
Ok(None)
#[tracing::instrument(level = "debug", name = "Get media url", skip(self))]
pub(crate) async fn get_url(&self, uuid: Uuid) -> Result<Option<IriString>, Error> {
self.db.media_url(uuid).await
}
pub async fn get_bytes(&self, uuid: Uuid) -> Option<(String, Bytes)> {
self.byte_cache.read().await.get(&uuid).cloned()
}
pub async fn store_url(&self, url: &XsdAnyUri) -> Result<Uuid, MyError> {
#[tracing::instrument(name = "Store media url", skip_all, fields(url = url.to_string().as_str()))]
pub(crate) async fn store_url(&self, url: IriString) -> Result<Uuid, Error> {
let uuid = Uuid::new_v4();
let (_, _, res) = join!(
async {
self.inverse.lock().await.insert(url.clone(), uuid);
},
async {
self.url_cache.lock().await.put(uuid, url.clone());
},
async {
self.db
.pool()
.get()
.await?
.execute(
"INSERT INTO media (
media_id,
url,
created_at,
updated_at
) VALUES (
$1::UUID,
$2::TEXT,
'now',
'now'
) ON CONFLICT (media_id)
DO UPDATE SET url = $2::TEXT;",
&[&uuid, &url.as_str()],
)
.await?;
Ok(()) as Result<(), MyError>
}
);
res?;
self.db.save_url(url, uuid).await?;
Ok(uuid)
}
pub async fn store_bytes(&self, uuid: Uuid, content_type: String, bytes: Bytes) {
self.byte_cache
.write()
.await
.insert(uuid, (content_type, bytes), MEDIA_DURATION);
}
}

View file

@ -1,11 +0,0 @@
mod actor;
mod media;
mod node;
mod state;
pub use self::{
actor::{Actor, ActorCache},
media::Media,
node::{Contact, Info, Instance, Node, NodeCache},
state::State,
};

View file

@ -1,444 +1,229 @@
use crate::{db::Db, error::MyError};
use activitystreams_new::{primitives::XsdAnyUri, uri};
use log::{debug, error};
use std::{
collections::{HashMap, HashSet},
sync::Arc,
time::{Duration, SystemTime},
use crate::{
db::{Contact, Db, Info, Instance},
error::{Error, ErrorKind},
};
use tokio::sync::RwLock;
use tokio_postgres::types::Json;
use uuid::Uuid;
use activitystreams::{iri, iri_string::types::IriString};
use std::time::{Duration, SystemTime};
pub type ListenersCache = Arc<RwLock<HashSet<XsdAnyUri>>>;
#[derive(Clone)]
#[derive(Clone, Debug)]
pub struct NodeCache {
db: Db,
listeners: ListenersCache,
nodes: Arc<RwLock<HashMap<XsdAnyUri, Node>>>,
}
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub struct Node {
pub(crate) base: IriString,
pub(crate) info: Option<Info>,
pub(crate) instance: Option<Instance>,
pub(crate) contact: Option<Contact>,
}
impl std::fmt::Debug for Node {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Node")
.field("base", &self.base.to_string())
.field("info", &self.info)
.field("instance", &self.instance)
.field("contact", &self.contact)
.finish()
}
}
impl NodeCache {
pub fn new(db: Db, listeners: ListenersCache) -> Self {
NodeCache {
db,
listeners,
nodes: Arc::new(RwLock::new(HashMap::new())),
}
pub(crate) fn new(db: Db) -> Self {
NodeCache { db }
}
pub async fn nodes(&self) -> Vec<Node> {
let listeners: HashSet<_> = self.listeners.read().await.clone();
#[tracing::instrument(level = "debug", name = "Get nodes", skip(self))]
pub(crate) async fn nodes(&self) -> Result<Vec<Node>, Error> {
let infos = self.db.connected_info().await?;
let instances = self.db.connected_instance().await?;
let contacts = self.db.connected_contact().await?;
self.nodes
.read()
.await
.iter()
.filter_map(|(k, v)| {
if listeners.contains(k) {
Some(v.clone())
} else {
None
}
let vec = self
.db
.connected_ids()
.await?
.into_iter()
.map(move |actor_id| {
let info = infos.get(&actor_id).cloned();
let instance = instances.get(&actor_id).cloned();
let contact = contacts.get(&actor_id).cloned();
Node::new(actor_id).map(|node| node.info(info).instance(instance).contact(contact))
})
.collect()
.collect::<Result<Vec<Node>, Error>>()?;
Ok(vec)
}
pub async fn is_nodeinfo_outdated(&self, listener: &XsdAnyUri) -> bool {
let read_guard = self.nodes.read().await;
let node = match read_guard.get(listener) {
None => {
debug!("No node for listener {}", listener);
return true;
}
Some(node) => node,
};
match node.info.as_ref() {
Some(nodeinfo) => nodeinfo.outdated(),
None => {
debug!("No info for node {}", node.base);
true
}
}
#[tracing::instrument(level = "debug", name = "Is NodeInfo Outdated", skip_all, fields(actor_id = actor_id.to_string().as_str()))]
pub(crate) async fn is_nodeinfo_outdated(&self, actor_id: IriString) -> bool {
self.db
.info(actor_id)
.await
.map(|opt| opt.map(|info| info.outdated()).unwrap_or(true))
.unwrap_or(true)
}
pub async fn is_contact_outdated(&self, listener: &XsdAnyUri) -> bool {
let read_guard = self.nodes.read().await;
let node = match read_guard.get(listener) {
None => {
debug!("No node for listener {}", listener);
return true;
}
Some(node) => node,
};
match node.contact.as_ref() {
Some(contact) => contact.outdated(),
None => {
debug!("No contact for node {}", node.base);
true
}
}
#[tracing::instrument(level = "debug", name = "Is Contact Outdated", skip_all, fields(actor_id = actor_id.to_string().as_str()))]
pub(crate) async fn is_contact_outdated(&self, actor_id: IriString) -> bool {
self.db
.contact(actor_id)
.await
.map(|opt| opt.map(|contact| contact.outdated()).unwrap_or(true))
.unwrap_or(true)
}
pub async fn is_instance_outdated(&self, listener: &XsdAnyUri) -> bool {
let read_guard = self.nodes.read().await;
let node = match read_guard.get(listener) {
None => {
debug!("No node for listener {}", listener);
return true;
}
Some(node) => node,
};
match node.instance.as_ref() {
Some(instance) => instance.outdated(),
None => {
debug!("No instance for node {}", node.base);
true
}
}
#[tracing::instrument(level = "debug", name = "Is Instance Outdated", skip_all, fields(actor_id = actor_id.to_string().as_str()))]
pub(crate) async fn is_instance_outdated(&self, actor_id: IriString) -> bool {
self.db
.instance(actor_id)
.await
.map(|opt| opt.map(|instance| instance.outdated()).unwrap_or(true))
.unwrap_or(true)
}
pub async fn cache_by_id(&self, id: Uuid) {
if let Err(e) = self.do_cache_by_id(id).await {
error!("Error loading node into cache, {}", e);
}
}
pub async fn bust_by_id(&self, id: Uuid) {
if let Err(e) = self.do_bust_by_id(id).await {
error!("Error busting node cache, {}", e);
}
}
async fn do_bust_by_id(&self, id: Uuid) -> Result<(), MyError> {
let row_opt = self
.db
.pool()
.get()
.await?
.query_opt(
"SELECT ls.actor_id
FROM listeners AS ls
INNER JOIN nodes AS nd ON nd.listener_id = ls.id
WHERE nd.id = $1::UUID
LIMIT 1;",
&[&id],
)
.await?;
let row = if let Some(row) = row_opt {
row
} else {
return Ok(());
};
let listener: String = row.try_get(0)?;
self.nodes.write().await.remove(&uri!(listener));
Ok(())
}
async fn do_cache_by_id(&self, id: Uuid) -> Result<(), MyError> {
let row_opt = self
.db
.pool()
.get()
.await?
.query_opt(
"SELECT ls.actor_id, nd.nodeinfo, nd.instance, nd.contact
FROM nodes AS nd
INNER JOIN listeners AS ls ON nd.listener_id = ls.id
WHERE nd.id = $1::UUID
LIMIT 1;",
&[&id],
)
.await?;
let row = if let Some(row) = row_opt {
row
} else {
return Ok(());
};
let listener: String = row.try_get(0)?;
let listener = uri!(listener);
let info: Option<Json<Info>> = row.try_get(1)?;
let instance: Option<Json<Instance>> = row.try_get(2)?;
let contact: Option<Json<Contact>> = row.try_get(3)?;
{
let mut write_guard = self.nodes.write().await;
let node = write_guard
.entry(listener.clone())
.or_insert_with(|| Node::new(listener));
if let Some(info) = info {
node.info = Some(info.0);
}
if let Some(instance) = instance {
node.instance = Some(instance.0);
}
if let Some(contact) = contact {
node.contact = Some(contact.0);
}
}
Ok(())
}
pub async fn set_info(
#[tracing::instrument(level = "debug", name = "Save node info", skip_all, fields(actor_id = actor_id.to_string().as_str(), software, version, reg))]
pub(crate) async fn set_info(
&self,
listener: &XsdAnyUri,
actor_id: IriString,
software: String,
version: String,
reg: bool,
) -> Result<(), MyError> {
if !self.listeners.read().await.contains(listener) {
let mut nodes = self.nodes.write().await;
nodes.remove(listener);
return Ok(());
}
let node = {
let mut write_guard = self.nodes.write().await;
let node = write_guard
.entry(listener.clone())
.or_insert_with(|| Node::new(listener.clone()));
node.set_info(software, version, reg);
node.clone()
};
self.save(listener, &node).await?;
Ok(())
}
pub async fn set_instance(
&self,
listener: &XsdAnyUri,
title: String,
description: String,
version: String,
reg: bool,
requires_approval: bool,
) -> Result<(), MyError> {
if !self.listeners.read().await.contains(listener) {
let mut nodes = self.nodes.write().await;
nodes.remove(listener);
return Ok(());
}
let node = {
let mut write_guard = self.nodes.write().await;
let node = write_guard
.entry(listener.clone())
.or_insert_with(|| Node::new(listener.clone()));
node.set_instance(title, description, version, reg, requires_approval);
node.clone()
};
self.save(listener, &node).await?;
Ok(())
}
pub async fn set_contact(
&self,
listener: &XsdAnyUri,
username: String,
display_name: String,
url: XsdAnyUri,
avatar: XsdAnyUri,
) -> Result<(), MyError> {
if !self.listeners.read().await.contains(listener) {
let mut nodes = self.nodes.write().await;
nodes.remove(listener);
return Ok(());
}
let node = {
let mut write_guard = self.nodes.write().await;
let node = write_guard
.entry(listener.clone())
.or_insert_with(|| Node::new(listener.clone()));
node.set_contact(username, display_name, url, avatar);
node.clone()
};
self.save(listener, &node).await?;
Ok(())
}
pub async fn save(&self, listener: &XsdAnyUri, node: &Node) -> Result<(), MyError> {
let row_opt = self
.db
.pool()
.get()
.await?
.query_opt(
"SELECT id FROM listeners WHERE actor_id = $1::TEXT LIMIT 1;",
&[&listener.as_str()],
)
.await?;
let id: Uuid = if let Some(row) = row_opt {
row.try_get(0)?
} else {
return Err(MyError::NotSubscribed(listener.as_str().to_owned()));
};
) -> Result<(), Error> {
self.db
.pool()
.get()
.await?
.execute(
"INSERT INTO nodes (
listener_id,
nodeinfo,
instance,
contact,
created_at,
updated_at
) VALUES (
$1::UUID,
$2::JSONB,
$3::JSONB,
$4::JSONB,
'now',
'now'
) ON CONFLICT (listener_id)
DO UPDATE SET
nodeinfo = $2::JSONB,
instance = $3::JSONB,
contact = $4::JSONB;",
&[
&id,
&Json(&node.info),
&Json(&node.instance),
&Json(&node.contact),
],
.save_info(
actor_id,
Info {
software,
version,
reg,
updated: SystemTime::now(),
},
)
.await?;
Ok(())
}
}
#[derive(Clone, Debug)]
pub struct Node {
pub base: XsdAnyUri,
pub info: Option<Info>,
pub instance: Option<Instance>,
pub contact: Option<Contact>,
}
impl Node {
pub fn new(mut uri: XsdAnyUri) -> Self {
let url = uri.as_mut();
url.set_fragment(None);
url.set_query(None);
url.set_path("");
Node {
base: uri,
info: None,
instance: None,
contact: None,
}
.await
}
fn set_info(&mut self, software: String, version: String, reg: bool) -> &mut Self {
self.info = Some(Info {
software,
version,
reg,
updated: SystemTime::now(),
});
self
}
fn set_instance(
&mut self,
title: String,
description: String,
version: String,
reg: bool,
requires_approval: bool,
) -> &mut Self {
self.instance = Some(Instance {
#[tracing::instrument(
level = "debug",
name = "Save instance info",
skip_all,
fields(
actor_id = actor_id.to_string().as_str(),
title,
description,
version,
reg,
requires_approval,
updated: SystemTime::now(),
});
self
requires_approval
)
)]
pub(crate) async fn set_instance(
&self,
actor_id: IriString,
title: String,
description: String,
version: String,
reg: bool,
requires_approval: bool,
) -> Result<(), Error> {
self.db
.save_instance(
actor_id,
Instance {
title,
description,
version,
reg,
requires_approval,
updated: SystemTime::now(),
},
)
.await
}
fn set_contact(
&mut self,
username: String,
display_name: String,
url: XsdAnyUri,
avatar: XsdAnyUri,
) -> &mut Self {
self.contact = Some(Contact {
#[tracing::instrument(
level = "debug",
name = "Save contact info",
skip_all,
fields(
actor_id = actor_id.to_string().as_str(),
username,
display_name,
url,
avatar,
updated: SystemTime::now(),
});
self
url = url.to_string().as_str(),
avatar = avatar.to_string().as_str()
)
)]
pub(crate) async fn set_contact(
&self,
actor_id: IriString,
username: String,
display_name: String,
url: IriString,
avatar: IriString,
) -> Result<(), Error> {
self.db
.save_contact(
actor_id,
Contact {
username,
display_name,
url,
avatar,
updated: SystemTime::now(),
},
)
.await
}
}
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct Info {
pub software: String,
pub version: String,
pub reg: bool,
pub updated: SystemTime,
}
impl Node {
fn new(url: IriString) -> Result<Self, Error> {
let authority = url.authority_str().ok_or(ErrorKind::MissingDomain)?;
let scheme = url.scheme_str();
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct Instance {
pub title: String,
pub description: String,
pub version: String,
pub reg: bool,
pub requires_approval: bool,
pub updated: SystemTime,
}
let base = iri!(format!("{scheme}://{authority}"));
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct Contact {
pub username: String,
pub display_name: String,
pub url: XsdAnyUri,
pub avatar: XsdAnyUri,
pub updated: SystemTime,
Ok(Node {
base,
info: None,
instance: None,
contact: None,
})
}
fn info(mut self, info: Option<Info>) -> Self {
self.info = info;
self
}
fn instance(mut self, instance: Option<Instance>) -> Self {
self.instance = instance;
self
}
fn contact(mut self, contact: Option<Contact>) -> Self {
self.contact = contact;
self
}
}
static TEN_MINUTES: Duration = Duration::from_secs(60 * 10);
impl Info {
pub fn outdated(&self) -> bool {
pub(crate) fn outdated(&self) -> bool {
self.updated + TEN_MINUTES < SystemTime::now()
}
}
impl Instance {
pub fn outdated(&self) -> bool {
pub(crate) fn outdated(&self) -> bool {
self.updated + TEN_MINUTES < SystemTime::now()
}
}
impl Contact {
pub fn outdated(&self) -> bool {
pub(crate) fn outdated(&self) -> bool {
self.updated + TEN_MINUTES < SystemTime::now()
}
}

View file

@ -1,215 +1,136 @@
use crate::{
config::{Config, UrlKind},
data::NodeCache,
db::Db,
error::MyError,
requests::Requests,
};
use activitystreams_new::primitives::XsdAnyUri;
use actix_rt::{
spawn,
time::{interval_at, Instant},
error::Error,
requests::{Breakers, Requests},
spawner::Spawner,
};
use activitystreams::iri_string::types::IriString;
use actix_web::web;
use futures::{join, try_join};
use log::{error, info};
use lru::LruCache;
use rand::thread_rng;
use rsa::{RSAPrivateKey, RSAPublicKey};
use std::{collections::HashSet, sync::Arc, time::Duration};
use tokio::sync::RwLock;
use reqwest_middleware::ClientWithMiddleware;
use rsa::{RsaPrivateKey, RsaPublicKey};
use std::sync::{Arc, RwLock};
use super::LastOnline;
#[derive(Clone)]
pub struct State {
pub public_key: RSAPublicKey,
private_key: RSAPrivateKey,
config: Config,
actor_id_cache: Arc<RwLock<LruCache<XsdAnyUri, XsdAnyUri>>>,
blocks: Arc<RwLock<HashSet<String>>>,
whitelists: Arc<RwLock<HashSet<String>>>,
listeners: Arc<RwLock<HashSet<XsdAnyUri>>>,
node_cache: NodeCache,
pub(crate) requests: Requests,
pub(crate) public_key: RsaPublicKey,
object_cache: Arc<RwLock<LruCache<IriString, IriString>>>,
pub(crate) node_cache: NodeCache,
breakers: Breakers,
pub(crate) last_online: Arc<LastOnline>,
pub(crate) db: Db,
}
impl std::fmt::Debug for State {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("State")
.field("node_cache", &self.node_cache)
.field("breakers", &self.breakers)
.field("db", &self.db)
.finish()
}
}
impl State {
pub fn node_cache(&self) -> NodeCache {
self.node_cache.clone()
}
pub fn requests(&self) -> Requests {
Requests::new(
self.config.generate_url(UrlKind::MainKey).to_string(),
self.private_key.clone(),
format!(
"Actix Web 3.0.0-alpha.1 ({}/{}; +{})",
self.config.software_name(),
self.config.software_version(),
self.config.generate_url(UrlKind::Index),
),
#[tracing::instrument(
level = "debug",
name = "Get inboxes for other domains",
skip_all,
fields(
existing_inbox = existing_inbox.to_string().as_str(),
authority
)
}
pub async fn bust_whitelist(&self, whitelist: &str) {
self.whitelists.write().await.remove(whitelist);
}
pub async fn bust_block(&self, block: &str) {
self.blocks.write().await.remove(block);
}
pub async fn bust_listener(&self, inbox: &XsdAnyUri) {
self.listeners.write().await.remove(inbox);
}
pub async fn listeners(&self) -> Vec<XsdAnyUri> {
self.listeners.read().await.iter().cloned().collect()
}
pub async fn blocks(&self) -> Vec<String> {
self.blocks.read().await.iter().cloned().collect()
}
pub async fn listeners_without(&self, inbox: &XsdAnyUri, domain: &str) -> Vec<XsdAnyUri> {
self.listeners
.read()
.await
)]
pub(crate) async fn inboxes_without(
&self,
existing_inbox: &IriString,
authority: &str,
) -> Result<Vec<IriString>, Error> {
Ok(self
.db
.inboxes()
.await?
.iter()
.filter_map(|listener| {
if let Some(dom) = listener.as_url().domain() {
if listener != inbox && dom != domain {
return Some(listener.clone());
.filter_map(|inbox| {
if let Some(authority_str) = inbox.authority_str() {
if inbox != existing_inbox && authority_str != authority {
return Some(inbox.clone());
}
}
None
})
.collect()
.collect())
}
pub async fn is_whitelisted(&self, actor_id: &XsdAnyUri) -> bool {
if !self.config.whitelist_mode() {
return true;
}
if let Some(domain) = actor_id.as_url().domain() {
return self.whitelists.read().await.contains(domain);
}
false
pub(crate) fn is_cached(&self, object_id: &IriString) -> bool {
self.object_cache.read().unwrap().contains(object_id)
}
pub async fn is_blocked(&self, actor_id: &XsdAnyUri) -> bool {
if let Some(domain) = actor_id.as_url().domain() {
return self.blocks.read().await.contains(domain);
}
true
pub(crate) fn cache(&self, object_id: IriString, actor_id: IriString) {
let mut guard = self.object_cache.write().unwrap();
guard.put(object_id, actor_id);
metrics::gauge!("relay.object-cache.size").set(crate::collector::recordable(guard.len()));
}
pub async fn is_listener(&self, actor_id: &XsdAnyUri) -> bool {
self.listeners.read().await.contains(actor_id)
pub(crate) fn is_connected(&self, iri: &IriString) -> bool {
self.breakers.should_try(iri)
}
pub async fn is_cached(&self, object_id: &XsdAnyUri) -> bool {
self.actor_id_cache.read().await.contains(object_id)
}
#[tracing::instrument(level = "debug", name = "Building state", skip_all)]
pub(crate) async fn build(
db: Db,
key_id: String,
spawner: Spawner,
client: ClientWithMiddleware,
) -> Result<Self, Error> {
let private_key = if let Ok(Some(key)) = db.private_key().await {
tracing::debug!("Using existing key");
key
} else {
tracing::info!("Generating new keys");
let key = web::block(move || {
let mut rng = thread_rng();
RsaPrivateKey::new(&mut rng, 4096)
})
.await??;
pub async fn cache(&self, object_id: XsdAnyUri, actor_id: XsdAnyUri) {
self.actor_id_cache.write().await.put(object_id, actor_id);
}
db.update_private_key(&key).await?;
pub async fn cache_block(&self, host: String) {
self.blocks.write().await.insert(host);
}
pub async fn cache_whitelist(&self, host: String) {
self.whitelists.write().await.insert(host);
}
pub async fn cache_listener(&self, listener: XsdAnyUri) {
self.listeners.write().await.insert(listener);
}
pub async fn rehydrate(&self, db: &Db) -> Result<(), MyError> {
let f1 = db.hydrate_blocks();
let f2 = db.hydrate_whitelists();
let f3 = db.hydrate_listeners();
let (blocks, whitelists, listeners) = try_join!(f1, f2, f3)?;
join!(
async move {
*self.listeners.write().await = listeners;
},
async move {
*self.whitelists.write().await = whitelists;
},
async move {
*self.blocks.write().await = blocks;
}
);
Ok(())
}
pub async fn hydrate(config: Config, db: &Db) -> Result<Self, MyError> {
let f1 = db.hydrate_blocks();
let f2 = db.hydrate_whitelists();
let f3 = db.hydrate_listeners();
let f4 = async move {
if let Ok(Some(key)) = db.hydrate_private_key().await {
Ok(key)
} else {
info!("Generating new keys");
let key = web::block(move || {
let mut rng = thread_rng();
RSAPrivateKey::new(&mut rng, 4096)
})
.await?;
db.update_private_key(&key).await?;
Ok(key)
}
key
};
let (blocks, whitelists, listeners, private_key) = try_join!(f1, f2, f3, f4)?;
let public_key = private_key.to_public_key();
let listeners = Arc::new(RwLock::new(listeners));
let breakers = Breakers::default();
let last_online = Arc::new(LastOnline::empty());
let requests = Requests::new(
key_id,
private_key,
breakers.clone(),
last_online.clone(),
spawner,
client,
);
let state = State {
requests,
public_key,
private_key,
config,
actor_id_cache: Arc::new(RwLock::new(LruCache::new(1024 * 8))),
blocks: Arc::new(RwLock::new(blocks)),
whitelists: Arc::new(RwLock::new(whitelists)),
listeners: listeners.clone(),
node_cache: NodeCache::new(db.clone(), listeners),
object_cache: Arc::new(RwLock::new(LruCache::new(
(1024 * 8).try_into().expect("nonzero"),
))),
node_cache: NodeCache::new(db.clone()),
breakers,
db,
last_online,
};
state.spawn_rehydrate(db.clone());
Ok(state)
}
fn spawn_rehydrate(&self, db: Db) {
let state = self.clone();
spawn(async move {
let start = Instant::now();
let duration = Duration::from_secs(60 * 10);
let mut interval = interval_at(start, duration);
loop {
interval.tick().await;
if let Err(e) = state.rehydrate(&db).await {
error!("Error rehydrating, {}", e);
}
}
});
}
}

1050
src/db.rs

File diff suppressed because it is too large Load diff

View file

@ -1,43 +1,169 @@
use activitystreams_new::primitives::XsdAnyUriError;
use activitystreams::checked::CheckError;
use actix_web::{
error::{BlockingError, ResponseError},
http::StatusCode,
HttpResponse,
};
use deadpool::managed::{PoolError, TimeoutType};
use http_signature_normalization_actix::PrepareSignError;
use log::error;
use rsa_pem::KeyError;
use std::{convert::Infallible, fmt::Debug, io::Error};
use background_jobs::BoxError;
use color_eyre::eyre::Error as Report;
use http_signature_normalization_reqwest::SignError;
use std::{convert::Infallible, io, sync::Arc};
use tokio::task::JoinError;
#[derive(Clone)]
struct ArcKind {
kind: Arc<ErrorKind>,
}
impl std::fmt::Debug for ArcKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.kind.fmt(f)
}
}
impl std::fmt::Display for ArcKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.kind.fmt(f)
}
}
impl std::error::Error for ArcKind {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
self.kind.source()
}
}
pub(crate) struct Error {
kind: ArcKind,
display: Box<str>,
debug: Box<str>,
}
impl Error {
fn kind(&self) -> &ErrorKind {
&self.kind.kind
}
pub(crate) fn is_breaker(&self) -> bool {
matches!(self.kind(), ErrorKind::Breaker)
}
pub(crate) fn is_not_found(&self) -> bool {
matches!(self.kind(), ErrorKind::Status(_, StatusCode::NOT_FOUND))
}
pub(crate) fn is_bad_request(&self) -> bool {
matches!(self.kind(), ErrorKind::Status(_, StatusCode::BAD_REQUEST))
}
pub(crate) fn is_gone(&self) -> bool {
matches!(self.kind(), ErrorKind::Status(_, StatusCode::GONE))
}
pub(crate) fn is_malformed_json(&self) -> bool {
matches!(self.kind(), ErrorKind::Json(_))
}
}
impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(&self.debug)
}
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(&self.display)
}
}
impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
self.kind().source()
}
}
impl<T> From<T> for Error
where
ErrorKind: From<T>,
{
fn from(error: T) -> Self {
let kind = ArcKind {
kind: Arc::new(ErrorKind::from(error)),
};
let report = Report::new(kind.clone());
let display = format!("{report}");
let debug = format!("{report:?}");
Error {
kind,
display: Box::from(display),
debug: Box::from(debug),
}
}
}
#[derive(Debug, thiserror::Error)]
pub enum MyError {
#[error("Error queueing job, {0}")]
Queue(anyhow::Error),
pub(crate) enum ErrorKind {
#[error("Error in extractor")]
Extractor(#[from] crate::extractors::ErrorKind),
#[error("Error in configuration, {0}")]
#[error("Error queueing job")]
Queue(#[from] BoxError),
#[error("Error in configuration")]
Config(#[from] config::ConfigError),
#[error("Error in db, {0}")]
DbError(#[from] tokio_postgres::error::Error),
#[error("Couldn't parse key")]
Pkcs8(#[from] rsa::pkcs8::Error),
#[error("Couldn't parse key, {0}")]
Key(#[from] KeyError),
#[error("Couldn't encode public key")]
Spki(#[from] rsa::pkcs8::spki::Error),
#[error("Couldn't parse URI, {0}")]
Uri(#[from] XsdAnyUriError),
#[error("Couldn't sign request")]
SignRequest,
#[error("Couldn't perform IO, {0}")]
Io(#[from] Error),
#[error("Response body from server exceeded limits")]
BodyTooLarge,
#[error("Couldn't make request")]
Reqwest(#[from] reqwest::Error),
#[error("Couldn't make request")]
ReqwestMiddleware(#[from] reqwest_middleware::Error),
#[error("Couldn't parse IRI")]
ParseIri(#[from] activitystreams::iri_string::validate::Error),
#[error("Couldn't normalize IRI")]
NormalizeIri(#[from] std::collections::TryReserveError),
#[error("Couldn't perform IO")]
Io(#[from] io::Error),
#[error("Couldn't sign string, {0}")]
Rsa(rsa::errors::Error),
#[error("Couldn't use db")]
Sled(#[from] sled::Error),
#[error("Couldn't do the json thing")]
Json(#[from] serde_json::Error),
#[error("Couldn't build signing string, {0}")]
PrepareSign(#[from] PrepareSignError),
#[error("Couldn't sign request")]
Sign(#[from] SignError),
#[error("Couldn't sign digest")]
Signature(#[from] rsa::signature::Error),
#[error("Couldn't prepare TLS private key")]
PrepareKey(#[from] rustls::Error),
#[error("Couldn't verify signature")]
VerifySignature,
#[error("Failed to encode key der")]
DerEncode,
#[error("Couldn't parse the signature header")]
HeaderValidation(#[from] actix_web::http::header::InvalidHeaderValue),
@ -48,11 +174,8 @@ pub enum MyError {
#[error("Actor ({0}), or Actor's server, is not subscribed")]
NotSubscribed(String),
#[error("Actor is blocked, {0}")]
Blocked(String),
#[error("Actor is not whitelisted, {0}")]
Whitelist(String),
#[error("Actor is not allowed, {0}")]
NotAllowed(String),
#[error("Cannot make decisions for foreign actor, {0}")]
WrongActor(String),
@ -61,26 +184,20 @@ pub enum MyError {
BadActor(String, String),
#[error("Signature verification is required, but no signature was given")]
NoSignature(String),
NoSignature(Option<String>),
#[error("Wrong ActivityPub kind, {0}")]
Kind(String),
#[error("Too many CPUs, {0}")]
#[error("Too many CPUs")]
CpuCount(#[from] std::num::TryFromIntError),
#[error("Hosts don't match, {0}, {1}")]
HostMismatch(String, String),
#[error("Invalid or missing content type")]
ContentType,
#[error("Host mismatch")]
HostMismatch(#[from] CheckError),
#[error("Couldn't flush buffer")]
FlushBuffer,
#[error("Timed out while waiting on db pool, {0:?}")]
DbTimeout(TimeoutType),
#[error("Invalid algorithm provided to verifier, {0}")]
Algorithm(String),
@ -93,8 +210,8 @@ pub enum MyError {
#[error("Couldn't receive request response from {0}, {1}")]
ReceiveResponse(String, String),
#[error("Response has invalid status code, {0}")]
Status(StatusCode),
#[error("Response from {0} has invalid status code, {1}")]
Status(String, StatusCode),
#[error("Expected an Object, found something else")]
ObjectFormat,
@ -108,70 +225,86 @@ pub enum MyError {
#[error("Input is missing a 'id' field")]
MissingId,
#[error("IriString is missing a domain")]
MissingDomain,
#[error("URI is missing domain field")]
Domain,
#[error("Blocking operation was canceled")]
Canceled,
#[error("Not trying request due to failed breaker")]
Breaker,
#[error("Failed to extract fields from {0}")]
Extract(&'static str),
#[error("No API Token supplied")]
MissingApiToken,
}
impl ResponseError for MyError {
impl ResponseError for Error {
fn status_code(&self) -> StatusCode {
match self {
MyError::Blocked(_)
| MyError::Whitelist(_)
| MyError::WrongActor(_)
| MyError::BadActor(_, _) => StatusCode::FORBIDDEN,
MyError::NotSubscribed(_) => StatusCode::UNAUTHORIZED,
MyError::Duplicate => StatusCode::ACCEPTED,
MyError::Kind(_) | MyError::MissingKind | MyError::MissingId | MyError::ObjectCount => {
StatusCode::BAD_REQUEST
match self.kind() {
ErrorKind::NotAllowed(_) | ErrorKind::WrongActor(_) | ErrorKind::BadActor(_, _) => {
StatusCode::FORBIDDEN
}
ErrorKind::NotSubscribed(_) => StatusCode::UNAUTHORIZED,
ErrorKind::Duplicate => StatusCode::ACCEPTED,
ErrorKind::Kind(_)
| ErrorKind::MissingKind
| ErrorKind::MissingId
| ErrorKind::ObjectCount
| ErrorKind::NoSignature(_) => StatusCode::BAD_REQUEST,
_ => StatusCode::INTERNAL_SERVER_ERROR,
}
}
fn error_response(&self) -> HttpResponse {
HttpResponse::build(self.status_code())
.header("Content-Type", "application/activity+json")
.json(serde_json::json!({
"error": self.to_string(),
}))
.insert_header(("Content-Type", "application/activity+json"))
.body(
serde_json::to_string(&serde_json::json!({
"error": self.kind().to_string(),
}))
.unwrap_or_else(|_| "{}".to_string()),
)
}
}
impl<T> From<BlockingError<T>> for MyError
where
T: Into<MyError> + Debug,
{
fn from(e: BlockingError<T>) -> Self {
match e {
BlockingError::Error(e) => e.into(),
BlockingError::Canceled => MyError::Canceled,
}
impl From<BlockingError> for ErrorKind {
fn from(_: BlockingError) -> Self {
ErrorKind::Canceled
}
}
impl<T> From<PoolError<T>> for MyError
where
T: Into<MyError>,
{
fn from(e: PoolError<T>) -> Self {
match e {
PoolError::Backend(e) => e.into(),
PoolError::Timeout(t) => MyError::DbTimeout(t),
}
impl From<JoinError> for ErrorKind {
fn from(_: JoinError) -> Self {
ErrorKind::Canceled
}
}
impl From<Infallible> for MyError {
impl From<Infallible> for ErrorKind {
fn from(i: Infallible) -> Self {
match i {}
}
}
impl From<rsa::errors::Error> for MyError {
impl From<rsa::errors::Error> for ErrorKind {
fn from(e: rsa::errors::Error) -> Self {
MyError::Rsa(e)
ErrorKind::Rsa(e)
}
}
impl From<http_signature_normalization_actix::Canceled> for ErrorKind {
fn from(_: http_signature_normalization_actix::Canceled) -> Self {
Self::Canceled
}
}
impl From<http_signature_normalization_reqwest::Canceled> for ErrorKind {
fn from(_: http_signature_normalization_reqwest::Canceled) -> Self {
Self::Canceled
}
}

202
src/extractors.rs Normal file
View file

@ -0,0 +1,202 @@
use actix_web::{
dev::Payload,
error::ParseError,
http::header::{from_one_raw_str, Header, HeaderName, HeaderValue, TryIntoHeaderValue},
web::Data,
FromRequest, HttpMessage, HttpRequest,
};
use bcrypt::{BcryptError, DEFAULT_COST};
use http_signature_normalization_actix::{prelude::InvalidHeaderValue, Canceled, Spawn};
use std::{convert::Infallible, str::FromStr, time::Instant};
use crate::{db::Db, error::Error, future::LocalBoxFuture, spawner::Spawner};
#[derive(Clone)]
pub(crate) struct AdminConfig {
hashed_api_token: String,
}
impl AdminConfig {
pub(crate) fn build(api_token: &str) -> Result<Self, Error> {
Ok(AdminConfig {
hashed_api_token: bcrypt::hash(api_token, DEFAULT_COST).map_err(Error::bcrypt_hash)?,
})
}
fn verify(&self, token: XApiToken) -> Result<bool, Error> {
bcrypt::verify(token.0, &self.hashed_api_token).map_err(Error::bcrypt_verify)
}
}
pub(crate) struct Admin {
db: Data<Db>,
}
type PrepareTuple = (Data<Db>, Data<AdminConfig>, Data<Spawner>, XApiToken);
impl Admin {
fn prepare_verify(req: &HttpRequest) -> Result<PrepareTuple, Error> {
let hashed_api_token = req
.app_data::<Data<AdminConfig>>()
.ok_or_else(Error::missing_config)?
.clone();
let x_api_token = XApiToken::parse(req).map_err(Error::parse_header)?;
let db = req
.app_data::<Data<Db>>()
.ok_or_else(Error::missing_db)?
.clone();
let spawner = req
.app_data::<Data<Spawner>>()
.ok_or_else(Error::missing_spawner)?
.clone();
Ok((db, hashed_api_token, spawner, x_api_token))
}
#[tracing::instrument(level = "debug", skip_all)]
async fn verify(
hashed_api_token: Data<AdminConfig>,
spawner: Data<Spawner>,
x_api_token: XApiToken,
) -> Result<(), Error> {
let span = tracing::Span::current();
if spawner
.spawn_blocking(move || span.in_scope(|| hashed_api_token.verify(x_api_token)))
.await
.map_err(Error::canceled)??
{
return Ok(());
}
Err(Error::invalid())
}
pub(crate) fn db_ref(&self) -> &Db {
&self.db
}
}
impl Error {
fn invalid() -> Self {
Error::from(ErrorKind::Invalid)
}
fn missing_config() -> Self {
Error::from(ErrorKind::MissingConfig)
}
fn missing_db() -> Self {
Error::from(ErrorKind::MissingDb)
}
fn missing_spawner() -> Self {
Error::from(ErrorKind::MissingSpawner)
}
fn bcrypt_verify(e: BcryptError) -> Self {
Error::from(ErrorKind::BCryptVerify(e))
}
fn bcrypt_hash(e: BcryptError) -> Self {
Error::from(ErrorKind::BCryptHash(e))
}
fn parse_header(e: ParseError) -> Self {
Error::from(ErrorKind::ParseHeader(e))
}
fn canceled(_: Canceled) -> Self {
Error::from(ErrorKind::Canceled)
}
}
#[derive(Debug, thiserror::Error)]
pub(crate) enum ErrorKind {
#[error("Invalid API Token")]
Invalid,
#[error("Missing Config")]
MissingConfig,
#[error("Missing Db")]
MissingDb,
#[error("Missing Spawner")]
MissingSpawner,
#[error("Panic in verify")]
Canceled,
#[error("Verifying")]
BCryptVerify(#[source] BcryptError),
#[error("Hashing")]
BCryptHash(#[source] BcryptError),
#[error("Parse Header")]
ParseHeader(#[source] ParseError),
}
impl FromRequest for Admin {
type Error = Error;
type Future = LocalBoxFuture<'static, Result<Self, Self::Error>>;
fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future {
let now = Instant::now();
let res = Self::prepare_verify(req);
Box::pin(async move {
let (db, c, s, t) = res?;
Self::verify(c, s, t).await?;
metrics::histogram!("relay.admin.verify")
.record(now.elapsed().as_micros() as f64 / 1_000_000_f64);
Ok(Admin { db })
})
}
}
pub(crate) struct XApiToken(String);
impl XApiToken {
pub(crate) fn new(token: String) -> Self {
Self(token)
}
pub(crate) const fn http1_name() -> reqwest::header::HeaderName {
reqwest::header::HeaderName::from_static("x-api-token")
}
}
impl Header for XApiToken {
fn name() -> HeaderName {
HeaderName::from_static("x-api-token")
}
fn parse<M: HttpMessage>(msg: &M) -> Result<Self, ParseError> {
from_one_raw_str(msg.headers().get(Self::name()))
}
}
impl TryIntoHeaderValue for XApiToken {
type Error = InvalidHeaderValue;
fn try_into_value(self) -> Result<HeaderValue, Self::Error> {
HeaderValue::from_str(&self.0)
}
}
impl FromStr for XApiToken {
type Err = Infallible;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(XApiToken(s.to_string()))
}
}
impl std::fmt::Display for XApiToken {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
}
}

4
src/future.rs Normal file
View file

@ -0,0 +1,4 @@
use std::{future::Future, pin::Pin};
pub(crate) type LocalBoxFuture<'a, T> = Pin<Box<dyn Future<Output = T> + 'a>>;
pub(crate) type BoxFuture<'a, T> = Pin<Box<dyn Future<Output = T> + Send + 'a>>;

18
src/http1.rs Normal file
View file

@ -0,0 +1,18 @@
pub(crate) fn name_to_http02(
name: &reqwest::header::HeaderName,
) -> actix_web::http::header::HeaderName {
actix_web::http::header::HeaderName::from_bytes(name.as_ref())
.expect("headername conversions always work")
}
pub(crate) fn value_to_http02(
value: &reqwest::header::HeaderValue,
) -> actix_web::http::header::HeaderValue {
actix_web::http::header::HeaderValue::from_bytes(value.as_bytes())
.expect("headervalue conversions always work")
}
pub(crate) fn status_to_http02(status: reqwest::StatusCode) -> actix_web::http::StatusCode {
actix_web::http::StatusCode::from_u16(status.as_u16())
.expect("statuscode conversions always work")
}

201
src/jobs.rs Normal file
View file

@ -0,0 +1,201 @@
pub mod apub;
mod contact;
mod deliver;
mod deliver_many;
mod instance;
mod nodeinfo;
mod process_listeners;
mod record_last_online;
pub(crate) use self::{
contact::QueryContact, deliver::Deliver, deliver_many::DeliverMany, instance::QueryInstance,
nodeinfo::QueryNodeinfo,
};
use crate::{
config::Config,
data::{ActorCache, MediaCache, State},
error::{Error, ErrorKind},
jobs::{process_listeners::Listeners, record_last_online::RecordLastOnline},
};
use background_jobs::{
memory_storage::{Storage, TokioTimer},
metrics::MetricsStorage,
tokio::{QueueHandle, WorkerConfig},
Job,
};
use std::time::Duration;
fn debug_object(activity: &serde_json::Value) -> &serde_json::Value {
let mut object = &activity["object"]["type"];
if object.is_null() {
object = &activity["object"]["id"];
}
if object.is_null() {
object = &activity["object"];
}
object
}
pub(crate) fn build_storage() -> MetricsStorage<Storage<TokioTimer>> {
MetricsStorage::wrap(Storage::new(TokioTimer))
}
pub(crate) fn create_workers(
storage: MetricsStorage<Storage<TokioTimer>>,
state: State,
actors: ActorCache,
media: MediaCache,
config: Config,
) -> std::io::Result<JobServer> {
let deliver_concurrency = config.deliver_concurrency();
let queue_handle = WorkerConfig::new(storage, move |queue_handle| {
JobState::new(
state.clone(),
actors.clone(),
JobServer::new(queue_handle),
media.clone(),
config.clone(),
)
})
.register::<Deliver>()
.register::<DeliverMany>()
.register::<QueryNodeinfo>()
.register::<QueryInstance>()
.register::<Listeners>()
.register::<QueryContact>()
.register::<RecordLastOnline>()
.register::<apub::Announce>()
.register::<apub::Follow>()
.register::<apub::Forward>()
.register::<apub::Reject>()
.register::<apub::Undo>()
.set_worker_count("maintenance", 2)
.set_worker_count("apub", 2)
.set_worker_count("deliver", deliver_concurrency)
.start()?;
queue_handle.every(Duration::from_secs(60 * 5), Listeners)?;
queue_handle.every(Duration::from_secs(60 * 10), RecordLastOnline)?;
Ok(JobServer::new(queue_handle))
}
#[derive(Clone, Debug)]
pub(crate) struct JobState {
state: State,
actors: ActorCache,
config: Config,
media: MediaCache,
job_server: JobServer,
}
#[derive(Clone)]
pub(crate) struct JobServer {
remote: QueueHandle,
}
impl std::fmt::Debug for JobServer {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("JobServer")
.field("queue_handle", &"QueueHandle")
.finish()
}
}
impl JobState {
fn new(
state: State,
actors: ActorCache,
job_server: JobServer,
media: MediaCache,
config: Config,
) -> Self {
JobState {
state,
actors,
config,
media,
job_server,
}
}
}
impl JobServer {
fn new(remote_handle: QueueHandle) -> Self {
JobServer {
remote: remote_handle,
}
}
pub(crate) async fn queue<J>(&self, job: J) -> Result<(), Error>
where
J: Job,
{
self.remote
.queue(job)
.await
.map_err(ErrorKind::Queue)
.map_err(Into::into)
}
}
struct Boolish {
inner: bool,
}
impl std::ops::Deref for Boolish {
type Target = bool;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<'de> serde::Deserialize<'de> for Boolish {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
#[derive(serde::Deserialize)]
#[serde(untagged)]
enum BoolThing {
Bool(bool),
String(String),
}
let thing: BoolThing = serde::Deserialize::deserialize(deserializer)?;
match thing {
BoolThing::Bool(inner) => Ok(Boolish { inner }),
BoolThing::String(s) if s.to_lowercase() == "false" => Ok(Boolish { inner: false }),
BoolThing::String(_) => Ok(Boolish { inner: true }),
}
}
}
#[cfg(test)]
mod tests {
use super::Boolish;
#[test]
fn boolish_works() {
const CASES: &[(&str, bool)] = &[
("false", false),
("\"false\"", false),
("\"FALSE\"", false),
("true", true),
("\"true\"", true),
("\"anything else\"", true),
];
for (case, output) in CASES {
let b: Boolish = serde_json::from_str(case).unwrap();
assert_eq!(*b, *output);
}
}
}

View file

@ -1,13 +1,14 @@
use crate::{
config::{Config, UrlKind},
data::{Actor, State},
error::MyError,
data::State,
db::Actor,
error::{Error, ErrorKind},
};
use activitystreams_new::{
use activitystreams::{
activity::{Follow as AsFollow, Undo as AsUndo},
context,
iri_string::types::IriString,
prelude::*,
primitives::XsdAnyUri,
security,
};
use std::convert::TryInto;
@ -18,30 +19,31 @@ mod forward;
mod reject;
mod undo;
pub use self::{announce::Announce, follow::Follow, forward::Forward, reject::Reject, undo::Undo};
pub(crate) use self::{
announce::Announce, follow::Follow, forward::Forward, reject::Reject, undo::Undo,
};
async fn get_inboxes(
state: &State,
actor: &Actor,
object_id: &XsdAnyUri,
) -> Result<Vec<XsdAnyUri>, MyError> {
let domain = object_id
.as_url()
.host()
.ok_or(MyError::Domain)?
object_id: &IriString,
) -> Result<Vec<IriString>, Error> {
let authority = object_id
.authority_str()
.ok_or(ErrorKind::Domain)?
.to_string();
Ok(state.listeners_without(&actor.inbox, &domain).await)
state.inboxes_without(&actor.inbox, &authority).await
}
fn prepare_activity<T, U, V, Kind>(
fn prepare_activity<T, U, V>(
mut t: T,
id: impl TryInto<XsdAnyUri, Error = U>,
to: impl TryInto<XsdAnyUri, Error = V>,
) -> Result<T, MyError>
id: impl TryInto<IriString, Error = U>,
to: impl TryInto<IriString, Error = V>,
) -> Result<T, Error>
where
T: ObjectExt<Kind> + BaseExt<Kind>,
MyError: From<U> + From<V>,
T: ObjectExt + BaseExt,
Error: From<U> + From<V>,
{
t.set_id(id.try_into()?)
.set_many_tos(vec![to.try_into()?])
@ -52,9 +54,9 @@ where
// Generate a type that says "I want to stop following you"
fn generate_undo_follow(
config: &Config,
actor_id: &XsdAnyUri,
my_id: &XsdAnyUri,
) -> Result<AsUndo, MyError> {
actor_id: &IriString,
my_id: &IriString,
) -> Result<AsUndo, Error> {
let mut follow = AsFollow::new(my_id.clone(), actor_id.clone());
follow.set_id(config.generate_url(UrlKind::Activity));

View file

@ -1,37 +1,48 @@
use crate::{
config::{Config, UrlKind},
data::Actor,
error::MyError,
db::Actor,
error::Error,
future::BoxFuture,
jobs::{
apub::{get_inboxes, prepare_activity},
DeliverMany, JobState,
},
};
use activitystreams_new::{activity::Announce as AsAnnounce, primitives::XsdAnyUri};
use background_jobs::ActixJob;
use std::{future::Future, pin::Pin};
use activitystreams::{activity::Announce as AsAnnounce, iri_string::types::IriString};
use background_jobs::Job;
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct Announce {
object_id: XsdAnyUri,
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Announce {
object_id: IriString,
actor: Actor,
}
impl std::fmt::Debug for Announce {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Announce")
.field("object_id", &self.object_id.to_string())
.field("actor_id", &self.actor.id)
.finish()
}
}
impl Announce {
pub fn new(object_id: XsdAnyUri, actor: Actor) -> Self {
pub fn new(object_id: IriString, actor: Actor) -> Self {
Announce { object_id, actor }
}
async fn perform(self, state: JobState) -> Result<(), anyhow::Error> {
#[tracing::instrument(name = "Announce", skip(state))]
async fn perform(self, state: JobState) -> Result<(), Error> {
let activity_id = state.config.generate_url(UrlKind::Activity);
let announce = generate_announce(&state.config, &activity_id, &self.object_id)?;
let inboxes = get_inboxes(&state.state, &self.actor, &self.object_id).await?;
state
.job_server
.queue(DeliverMany::new(inboxes, announce)?)?;
.queue(DeliverMany::new(inboxes, announce)?)
.await?;
state.state.cache(self.object_id, activity_id).await;
state.state.cache(self.object_id, activity_id);
Ok(())
}
}
@ -39,9 +50,9 @@ impl Announce {
// Generate a type that says "Look at this object"
fn generate_announce(
config: &Config,
activity_id: &XsdAnyUri,
object_id: &XsdAnyUri,
) -> Result<AsAnnounce, MyError> {
activity_id: &IriString,
object_id: &IriString,
) -> Result<AsAnnounce, Error> {
let announce = AsAnnounce::new(config.generate_url(UrlKind::Actor), object_id.clone());
prepare_activity(
@ -51,11 +62,13 @@ fn generate_announce(
)
}
impl ActixJob for Announce {
impl Job for Announce {
type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Announce";
const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state))

View file

@ -1,60 +1,77 @@
use crate::{
apub::AcceptedActivities,
config::{Config, UrlKind},
data::Actor,
error::MyError,
jobs::{apub::prepare_activity, Deliver, JobState},
db::Actor,
error::{Error, ErrorKind},
future::BoxFuture,
jobs::{apub::prepare_activity, Deliver, JobState, QueryInstance, QueryNodeinfo},
};
use activitystreams_new::{
use activitystreams::{
activity::{Accept as AsAccept, Follow as AsFollow},
iri_string::types::IriString,
prelude::*,
primitives::XsdAnyUri,
};
use background_jobs::ActixJob;
use std::{future::Future, pin::Pin};
use background_jobs::Job;
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct Follow {
is_listener: bool,
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Follow {
input: AcceptedActivities,
actor: Actor,
}
impl std::fmt::Debug for Follow {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Follow")
.field("input", &self.input.id_unchecked())
.field("actor", &self.actor.id)
.finish()
}
}
impl Follow {
pub fn new(is_listener: bool, input: AcceptedActivities, actor: Actor) -> Self {
Follow {
is_listener,
input,
actor,
}
pub fn new(input: AcceptedActivities, actor: Actor) -> Self {
Follow { input, actor }
}
async fn perform(self, state: JobState) -> Result<(), anyhow::Error> {
if !self.is_listener {
state.db.add_listener(self.actor.inbox.clone()).await?;
}
#[tracing::instrument(name = "Follow", skip(state))]
async fn perform(self, state: JobState) -> Result<(), Error> {
let my_id = state.config.generate_url(UrlKind::Actor);
// if following relay directly, not just following 'public', followback
if self.input.object_is(&my_id) && !state.actors.is_following(&self.actor.id).await {
if self.input.object_is(&my_id)
&& !state.state.db.is_connected(self.actor.id.clone()).await?
{
let follow = generate_follow(&state.config, &self.actor.id, &my_id)?;
state
.job_server
.queue(Deliver::new(self.actor.inbox.clone(), follow)?)?;
.queue(Deliver::new(self.actor.inbox.clone(), follow)?)
.await?;
}
state.actors.follower(&self.actor).await?;
state.actors.add_connection(self.actor.clone()).await?;
let accept = generate_accept_follow(
&state.config,
&self.actor.id,
self.input.id().ok_or(MyError::MissingId)?,
self.input.id_unchecked().ok_or(ErrorKind::MissingId)?,
&my_id,
)?;
state
.job_server
.queue(Deliver::new(self.actor.inbox, accept)?)?;
.queue(Deliver::new(self.actor.inbox, accept)?)
.await?;
state
.job_server
.queue(QueryInstance::new(self.actor.id.clone()))
.await?;
state
.job_server
.queue(QueryNodeinfo::new(self.actor.id))
.await?;
Ok(())
}
}
@ -62,9 +79,9 @@ impl Follow {
// Generate a type that says "I want to follow you"
fn generate_follow(
config: &Config,
actor_id: &XsdAnyUri,
my_id: &XsdAnyUri,
) -> Result<AsFollow, MyError> {
actor_id: &IriString,
my_id: &IriString,
) -> Result<AsFollow, Error> {
let follow = AsFollow::new(my_id.clone(), actor_id.clone());
prepare_activity(
@ -77,10 +94,10 @@ fn generate_follow(
// Generate a type that says "I accept your follow request"
fn generate_accept_follow(
config: &Config,
actor_id: &XsdAnyUri,
input_id: &XsdAnyUri,
my_id: &XsdAnyUri,
) -> Result<AsAccept, MyError> {
actor_id: &IriString,
input_id: &IriString,
my_id: &IriString,
) -> Result<AsAccept, Error> {
let mut follow = AsFollow::new(actor_id.clone(), my_id.clone());
follow.set_id(input_id.clone());
@ -94,11 +111,13 @@ fn generate_accept_follow(
)
}
impl ActixJob for Follow {
impl Job for Follow {
type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Follow";
const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state))

View file

@ -1,46 +1,59 @@
use crate::{
apub::AcceptedActivities,
data::Actor,
error::MyError,
db::Actor,
error::{Error, ErrorKind},
future::BoxFuture,
jobs::{apub::get_inboxes, DeliverMany, JobState},
};
use activitystreams_new::prelude::*;
use background_jobs::ActixJob;
use std::{future::Future, pin::Pin};
use activitystreams::prelude::*;
use background_jobs::Job;
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct Forward {
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Forward {
input: AcceptedActivities,
actor: Actor,
}
impl std::fmt::Debug for Forward {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Forward")
.field("input", &self.input.id_unchecked())
.field("actor", &self.actor.id)
.finish()
}
}
impl Forward {
pub fn new(input: AcceptedActivities, actor: Actor) -> Self {
Forward { input, actor }
}
async fn perform(self, state: JobState) -> Result<(), anyhow::Error> {
#[tracing::instrument(name = "Forward", skip(state))]
async fn perform(self, state: JobState) -> Result<(), Error> {
let object_id = self
.input
.object()
.object_unchecked()
.as_single_id()
.ok_or(MyError::MissingId)?;
.ok_or(ErrorKind::MissingId)?;
let inboxes = get_inboxes(&state.state, &self.actor, object_id).await?;
state
.job_server
.queue(DeliverMany::new(inboxes, self.input)?)?;
.queue(DeliverMany::new(inboxes, self.input)?)
.await?;
Ok(())
}
}
impl ActixJob for Forward {
impl Job for Forward {
type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Forward";
const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state))

View file

@ -1,34 +1,45 @@
use crate::{
config::UrlKind,
data::Actor,
db::Actor,
error::Error,
future::BoxFuture,
jobs::{apub::generate_undo_follow, Deliver, JobState},
};
use background_jobs::ActixJob;
use std::{future::Future, pin::Pin};
use background_jobs::Job;
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct Reject(pub Actor);
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Reject(pub(crate) Actor);
impl std::fmt::Debug for Reject {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Reject").field("actor", &self.0.id).finish()
}
}
impl Reject {
async fn perform(self, state: JobState) -> Result<(), anyhow::Error> {
if state.actors.unfollower(&self.0).await?.is_some() {
state.db.remove_listener(self.0.inbox.clone()).await?;
}
#[tracing::instrument(name = "Reject", skip(state))]
async fn perform(self, state: JobState) -> Result<(), Error> {
state.actors.remove_connection(&self.0).await?;
let my_id = state.config.generate_url(UrlKind::Actor);
let undo = generate_undo_follow(&state.config, &self.0.id, &my_id)?;
state.job_server.queue(Deliver::new(self.0.inbox, undo)?)?;
state
.job_server
.queue(Deliver::new(self.0.inbox, undo)?)
.await?;
Ok(())
}
}
impl ActixJob for Reject {
impl Job for Reject {
type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Reject";
const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state))

View file

@ -1,47 +1,60 @@
use crate::{
apub::AcceptedActivities,
config::UrlKind,
data::Actor,
db::Actor,
error::Error,
future::BoxFuture,
jobs::{apub::generate_undo_follow, Deliver, JobState},
};
use background_jobs::ActixJob;
use std::{future::Future, pin::Pin};
use activitystreams::prelude::BaseExt;
use background_jobs::Job;
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct Undo {
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Undo {
input: AcceptedActivities,
actor: Actor,
}
impl std::fmt::Debug for Undo {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Undo")
.field("input", &self.input.id_unchecked())
.field("actor", &self.actor.id)
.finish()
}
}
impl Undo {
pub fn new(input: AcceptedActivities, actor: Actor) -> Self {
pub(crate) fn new(input: AcceptedActivities, actor: Actor) -> Self {
Undo { input, actor }
}
async fn perform(self, state: JobState) -> Result<(), anyhow::Error> {
let was_following = state.actors.is_following(&self.actor.id).await;
#[tracing::instrument(name = "Undo", skip(state))]
async fn perform(self, state: JobState) -> Result<(), Error> {
let was_following = state.state.db.is_connected(self.actor.id.clone()).await?;
if state.actors.unfollower(&self.actor).await?.is_some() {
state.db.remove_listener(self.actor.inbox.clone()).await?;
}
state.actors.remove_connection(&self.actor).await?;
if was_following {
let my_id = state.config.generate_url(UrlKind::Actor);
let undo = generate_undo_follow(&state.config, &self.actor.id, &my_id)?;
state
.job_server
.queue(Deliver::new(self.actor.inbox, undo)?)?;
.queue(Deliver::new(self.actor.inbox, undo)?)
.await?;
}
Ok(())
}
}
impl ActixJob for Undo {
impl Job for Undo {
type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Undo";
const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state))

View file

@ -1,44 +0,0 @@
use crate::jobs::JobState;
use anyhow::Error;
use background_jobs::ActixJob;
use std::{future::Future, pin::Pin};
use uuid::Uuid;
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct CacheMedia {
uuid: Uuid,
}
impl CacheMedia {
pub fn new(uuid: Uuid) -> Self {
CacheMedia { uuid }
}
async fn perform(self, state: JobState) -> Result<(), Error> {
if state.media.get_bytes(self.uuid).await.is_some() {
return Ok(());
}
if let Some(url) = state.media.get_url(self.uuid).await? {
let (content_type, bytes) = state.requests.fetch_bytes(url.as_str()).await?;
state
.media
.store_bytes(self.uuid, content_type, bytes)
.await;
}
Ok(())
}
}
impl ActixJob for CacheMedia {
type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), Error>>>>;
const NAME: &'static str = "relay::jobs::CacheMedia";
fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state))
}
}

112
src/jobs/contact.rs Normal file
View file

@ -0,0 +1,112 @@
use crate::{
apub::AcceptedActors,
error::{Error, ErrorKind},
future::BoxFuture,
jobs::JobState,
requests::BreakerStrategy,
};
use activitystreams::{iri_string::types::IriString, object::Image, prelude::*};
use background_jobs::Job;
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct QueryContact {
actor_id: IriString,
contact_id: IriString,
}
impl std::fmt::Debug for QueryContact {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("QueryContact")
.field("actor_id", &self.actor_id.to_string())
.field("contact_id", &self.contact_id.to_string())
.finish()
}
}
impl QueryContact {
pub(crate) fn new(actor_id: IriString, contact_id: IriString) -> Self {
QueryContact {
actor_id,
contact_id,
}
}
async fn perform(self, state: JobState) -> Result<(), Error> {
let contact_outdated = state
.state
.node_cache
.is_contact_outdated(self.actor_id.clone())
.await;
if !contact_outdated {
return Ok(());
}
let contact = match state
.state
.requests
.fetch::<AcceptedActors>(&self.contact_id, BreakerStrategy::Allow404AndBelow)
.await
{
Ok(contact) => contact,
Err(e) if e.is_breaker() => {
tracing::debug!("Not retrying due to failed breaker");
return Ok(());
}
Err(e) => return Err(e),
};
let (username, display_name, url, avatar) =
to_contact(contact).ok_or(ErrorKind::Extract("contact"))?;
state
.state
.node_cache
.set_contact(self.actor_id, username, display_name, url, avatar)
.await?;
Ok(())
}
}
fn to_contact(contact: AcceptedActors) -> Option<(String, String, IriString, IriString)> {
let username = contact.preferred_username()?.to_owned();
let display_name = contact.name()?.as_one()?.as_xsd_string()?.to_owned();
let url = contact.url()?.as_single_id()?.to_owned();
let any_base = contact.icon()?.as_one()?;
let avatar = Image::from_any_base(any_base.clone())
.ok()??
.url()?
.as_single_id()?
.to_owned();
Some((username, display_name, url, avatar))
}
impl Job for QueryContact {
type State = JobState;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::QueryContact";
const QUEUE: &'static str = "maintenance";
fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state))
}
}
#[cfg(test)]
mod tests {
use super::to_contact;
const HYNET_ADMIN: &str = r#"{"@context":["https://www.w3.org/ns/activitystreams","https://soc.hyena.network/schemas/litepub-0.1.jsonld",{"@language":"und"}],"alsoKnownAs":[],"attachment":[{"name":"Website","type":"PropertyValue","value":"https://hyena.network/"},{"name":"Services","type":"PropertyValue","value":"Pleroma, Invidious, SearX, XMPP"},{"name":"CW","type":"PropertyValue","value":"all long posts"}],"capabilities":{"acceptsChatMessages":true},"discoverable":true,"endpoints":{"oauthAuthorizationEndpoint":"https://soc.hyena.network/oauth/authorize","oauthRegistrationEndpoint":"https://soc.hyena.network/api/v1/apps","oauthTokenEndpoint":"https://soc.hyena.network/oauth/token","sharedInbox":"https://soc.hyena.network/inbox","uploadMedia":"https://soc.hyena.network/api/ap/upload_media"},"followers":"https://soc.hyena.network/users/HyNET/followers","following":"https://soc.hyena.network/users/HyNET/following","icon":{"type":"Image","url":"https://soc.hyena.network/media/ab149b1e0196ffdbecc6830c7f6f1a14dd8d8408ec7db0f1e8ad9d40e600ea73.gif"},"id":"https://soc.hyena.network/users/HyNET","image":{"type":"Image","url":"https://soc.hyena.network/media/12ba78d3015e13aa65ac4e106e574dd7bf959614585f10ce85de40e0148da677.png"},"inbox":"https://soc.hyena.network/users/HyNET/inbox","manuallyApprovesFollowers":false,"name":"HyNET Announcement System :glider:","outbox":"https://soc.hyena.network/users/HyNET/outbox","preferredUsername":"HyNET","publicKey":{"id":"https://soc.hyena.network/users/HyNET#main-key","owner":"https://soc.hyena.network/users/HyNET","publicKeyPem":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyF74womumWRhR7RW4Q6a\n2+Av/Ue8QHiKwjQARJEakbKnKgkI5FRFVVOfMiYVJp/juNt4GLgK15panBqJa9Yt\nWACiHQjBd2yVI5tIHiae0uBj5SdUVuduoycVLG0lpJsg12p8m/vL1oaeLqehTqa6\nsYplQh1GCLet0cUdn/66Cj2pAPD3V7Bz3VnG+oyXIsGQbBB8RHnWhFH8b0qQOyur\nJRAB8aye6QAL2sQbfISM2lycWzNeIHkqsUb7FdqdhQ+Ze0rETRGDkOO2Qvpg0hQm\n6owMsHnHA/DzyOHLy6Yf+I3OUlBC/P1SSAKwORsifFDXL322AEqoDi5ZpwzG9m5z\nAQIDAQAB\n-----END PUBLIC KEY-----\n\n"},"summary":"Ran by <span class=\"h-card\"><a class=\"u-url mention\" data-user=\"9s8j4AHGt3ED0P0b6e\" href=\"https://soc.hyena.network/users/mel\" rel=\"ugc\">@<span>mel</span></a></span> :adm1::adm2: <br/>For direct help with the service, send <span class=\"h-card\"><a class=\"u-url mention\" data-user=\"9s8j4AHGt3ED0P0b6e\" href=\"https://soc.hyena.network/users/mel\" rel=\"ugc\">@<span>mel</span></a></span> a message.","tag":[{"icon":{"type":"Image","url":"https://soc.hyena.network/emoji/Signs/adm1.png"},"id":"https://soc.hyena.network/emoji/Signs/adm1.png","name":":adm1:","type":"Emoji","updated":"1970-01-01T00:00:00Z"},{"icon":{"type":"Image","url":"https://soc.hyena.network/emoji/Signs/adm2.png"},"id":"https://soc.hyena.network/emoji/Signs/adm2.png","name":":adm2:","type":"Emoji","updated":"1970-01-01T00:00:00Z"},{"icon":{"type":"Image","url":"https://soc.hyena.network/emoji/misc/glider.png"},"id":"https://soc.hyena.network/emoji/misc/glider.png","name":":glider:","type":"Emoji","updated":"1970-01-01T00:00:00Z"}],"type":"Service","url":"https://soc.hyena.network/users/HyNET"}"#;
#[test]
fn parse_hynet() {
let actor = serde_json::from_str(HYNET_ADMIN).unwrap();
to_contact(actor).unwrap();
}
}

View file

@ -1,17 +1,30 @@
use crate::{error::MyError, jobs::JobState};
use activitystreams_new::primitives::XsdAnyUri;
use anyhow::Error;
use background_jobs::{ActixJob, Backoff};
use std::{future::Future, pin::Pin};
use crate::{
error::Error,
future::BoxFuture,
jobs::{debug_object, JobState},
requests::BreakerStrategy,
};
use activitystreams::iri_string::types::IriString;
use background_jobs::{Backoff, Job};
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct Deliver {
to: XsdAnyUri,
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Deliver {
to: IriString,
data: serde_json::Value,
}
impl std::fmt::Debug for Deliver {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Deliver")
.field("to", &self.to.to_string())
.field("activity", &self.data["type"])
.field("object", debug_object(&self.data))
.finish()
}
}
impl Deliver {
pub fn new<T>(to: XsdAnyUri, data: T) -> Result<Self, MyError>
pub(crate) fn new<T>(to: IriString, data: T) -> Result<Self, Error>
where
T: serde::ser::Serialize,
{
@ -20,20 +33,39 @@ impl Deliver {
data: serde_json::to_value(data)?,
})
}
#[tracing::instrument(name = "Deliver", skip(state))]
async fn perform(self, state: JobState) -> Result<(), Error> {
if let Err(e) = state
.state
.requests
.deliver(&self.to, &self.data, BreakerStrategy::Allow401AndBelow)
.await
{
if e.is_breaker() {
tracing::debug!("Not trying due to failed breaker");
return Ok(());
}
if e.is_bad_request() {
tracing::debug!("Server didn't understand the activity");
return Ok(());
}
return Err(e);
}
Ok(())
}
}
impl ActixJob for Deliver {
impl Job for Deliver {
type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), Error>>>>;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::Deliver";
const QUEUE: &'static str = "deliver";
const BACKOFF: Backoff = Backoff::Exponential(8);
fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move {
state.requests.deliver(self.to, &self.data).await?;
Ok(())
})
Box::pin(self.perform(state))
}
}

View file

@ -1,20 +1,28 @@
use crate::{
error::MyError,
jobs::{Deliver, JobState},
error::Error,
future::BoxFuture,
jobs::{debug_object, Deliver, JobState},
};
use activitystreams_new::primitives::XsdAnyUri;
use anyhow::Error;
use background_jobs::ActixJob;
use futures::future::{ready, Ready};
use activitystreams::iri_string::types::IriString;
use background_jobs::Job;
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct DeliverMany {
to: Vec<XsdAnyUri>,
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct DeliverMany {
to: Vec<IriString>,
data: serde_json::Value,
}
impl std::fmt::Debug for DeliverMany {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("DeliverMany")
.field("activity", &self.data["type"])
.field("object", debug_object(&self.data))
.finish()
}
}
impl DeliverMany {
pub fn new<T>(to: Vec<XsdAnyUri>, data: T) -> Result<Self, MyError>
pub(crate) fn new<T>(to: Vec<IriString>, data: T) -> Result<Self, Error>
where
T: serde::ser::Serialize,
{
@ -24,24 +32,28 @@ impl DeliverMany {
})
}
fn perform(self, state: JobState) -> Result<(), Error> {
#[tracing::instrument(name = "Deliver many", skip(state))]
async fn perform(self, state: JobState) -> Result<(), Error> {
for inbox in self.to {
state
.job_server
.queue(Deliver::new(inbox, self.data.clone())?)?;
.queue(Deliver::new(inbox, self.data.clone())?)
.await?;
}
Ok(())
}
}
impl ActixJob for DeliverMany {
impl Job for DeliverMany {
type State = JobState;
type Future = Ready<Result<(), Error>>;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::DeliverMany";
const QUEUE: &'static str = "deliver";
fn run(self, state: Self::State) -> Self::Future {
ready(self.perform(state))
Box::pin(self.perform(state))
}
}

File diff suppressed because one or more lines are too long

View file

@ -1,121 +0,0 @@
pub mod apub;
mod cache_media;
mod deliver;
mod deliver_many;
mod instance;
mod nodeinfo;
mod process_listeners;
mod storage;
pub use self::{
cache_media::CacheMedia, deliver::Deliver, deliver_many::DeliverMany, instance::QueryInstance,
nodeinfo::QueryNodeinfo,
};
use crate::{
config::Config,
data::{ActorCache, Media, NodeCache, State},
db::Db,
error::MyError,
jobs::{process_listeners::Listeners, storage::Storage},
requests::Requests,
};
use background_jobs::{Job, QueueHandle, WorkerConfig};
use std::time::Duration;
pub fn create_server(db: Db) -> JobServer {
let shared = background_jobs::create_server(Storage::new(db));
shared.every(Duration::from_secs(60 * 5), Listeners);
JobServer::new(shared)
}
pub fn create_workers(
db: Db,
state: State,
actors: ActorCache,
job_server: JobServer,
media: Media,
config: Config,
) {
let remote_handle = job_server.remote.clone();
WorkerConfig::new(move || {
JobState::new(
db.clone(),
state.clone(),
actors.clone(),
job_server.clone(),
media.clone(),
config.clone(),
)
})
.register::<Deliver>()
.register::<DeliverMany>()
.register::<QueryNodeinfo>()
.register::<QueryInstance>()
.register::<Listeners>()
.register::<CacheMedia>()
.register::<apub::Announce>()
.register::<apub::Follow>()
.register::<apub::Forward>()
.register::<apub::Reject>()
.register::<apub::Undo>()
.set_worker_count("default", 4)
.start(remote_handle);
}
#[derive(Clone)]
pub struct JobState {
db: Db,
requests: Requests,
state: State,
actors: ActorCache,
config: Config,
media: Media,
node_cache: NodeCache,
job_server: JobServer,
}
#[derive(Clone)]
pub struct JobServer {
remote: QueueHandle,
}
impl JobState {
fn new(
db: Db,
state: State,
actors: ActorCache,
job_server: JobServer,
media: Media,
config: Config,
) -> Self {
JobState {
requests: state.requests(),
node_cache: state.node_cache(),
db,
actors,
config,
media,
state,
job_server,
}
}
}
impl JobServer {
fn new(remote_handle: QueueHandle) -> Self {
JobServer {
remote: remote_handle,
}
}
pub fn queue<J>(&self, job: J) -> Result<(), MyError>
where
J: Job,
{
self.remote.queue(job).map_err(MyError::Queue)
}
}

File diff suppressed because one or more lines are too long

View file

@ -1,29 +1,35 @@
use crate::jobs::{instance::QueryInstance, nodeinfo::QueryNodeinfo, JobState};
use anyhow::Error;
use background_jobs::ActixJob;
use std::{future::Future, pin::Pin};
use crate::{
error::Error,
future::BoxFuture,
jobs::{instance::QueryInstance, nodeinfo::QueryNodeinfo, JobState},
};
use background_jobs::Job;
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct Listeners;
pub(crate) struct Listeners;
impl Listeners {
#[tracing::instrument(name = "Spawn query instances", skip(state))]
async fn perform(self, state: JobState) -> Result<(), Error> {
for listener in state.state.listeners().await {
for actor_id in state.state.db.connected_ids().await? {
state
.job_server
.queue(QueryInstance::new(listener.clone()))?;
state.job_server.queue(QueryNodeinfo::new(listener))?;
.queue(QueryInstance::new(actor_id.clone()))
.await?;
state.job_server.queue(QueryNodeinfo::new(actor_id)).await?;
}
Ok(())
}
}
impl ActixJob for Listeners {
impl Job for Listeners {
type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), Error>>>>;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::Listeners";
const QUEUE: &'static str = "maintenance";
fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state))

View file

@ -0,0 +1,28 @@
use crate::{error::Error, future::BoxFuture, jobs::JobState};
use background_jobs::{Backoff, Job};
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub(crate) struct RecordLastOnline;
impl RecordLastOnline {
#[tracing::instrument(skip(state))]
async fn perform(self, state: JobState) -> Result<(), Error> {
let nodes = state.state.last_online.take();
state.state.db.mark_last_seen(nodes).await
}
}
impl Job for RecordLastOnline {
type State = JobState;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::RecordLastOnline";
const QUEUE: &'static str = "maintenance";
const BACKOFF: Backoff = Backoff::Linear(1);
fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state))
}
}

View file

@ -1,171 +0,0 @@
use crate::{db::Db, error::MyError};
use background_jobs::{dev::JobInfo, Stats};
use log::debug;
use tokio_postgres::types::Json;
use uuid::Uuid;
#[derive(Clone)]
pub struct Storage {
db: Db,
}
impl Storage {
pub fn new(db: Db) -> Self {
Storage { db }
}
}
#[async_trait::async_trait]
impl background_jobs::dev::Storage for Storage {
type Error = MyError;
async fn generate_id(&self) -> Result<Uuid, MyError> {
// TODO: Ensure unique job id
Ok(Uuid::new_v4())
}
async fn save_job(&self, job: JobInfo) -> Result<(), MyError> {
debug!(
"Inserting job {} status {} for queue {}",
job.id(),
job.status(),
job.queue()
);
self.db.pool().get().await?.execute(
"INSERT INTO jobs
(job_id, job_queue, job_timeout, job_updated, job_status, job_next_run, job_value, created_at)
VALUES
($1::UUID, $2::TEXT, $3::BIGINT, $4::TIMESTAMP, $5::TEXT, $6::TIMESTAMP, $7::JSONB, 'now')
ON CONFLICT (job_id)
DO UPDATE SET
job_updated = $4::TIMESTAMP,
job_status = $5::TEXT,
job_next_run = $6::TIMESTAMP,
job_value = $7::JSONB;",
&[&job.id(), &job.queue(), &job.timeout(), &job.updated_at().naive_utc(), &job.status().to_string(), &job.next_queue().map(|q| q.naive_utc()), &Json(&job)],
)
.await?;
Ok(())
}
async fn fetch_job(&self, id: Uuid) -> Result<Option<JobInfo>, MyError> {
debug!(
"SELECT job_value FROM jobs WHERE job_id = $1::UUID LIMIT 1; [{}]",
id
);
let row_opt = self
.db
.pool()
.get()
.await?
.query_opt(
"SELECT job_value
FROM jobs
WHERE job_id = $1::UUID
LIMIT 1;",
&[&id],
)
.await?;
let row = if let Some(row) = row_opt {
row
} else {
return Ok(None);
};
let value: Json<JobInfo> = row.try_get(0)?;
Ok(Some(value.0))
}
async fn fetch_job_from_queue(&self, queue: &str) -> Result<Option<JobInfo>, MyError> {
let row_opt = self
.db
.pool()
.get()
.await?
.query_opt(
"UPDATE jobs
SET
job_status = 'Running',
job_updated = 'now'
WHERE
job_id = (
SELECT job_id
FROM jobs
WHERE
job_queue = $1::TEXT
AND
(
job_next_run IS NULL
OR
job_next_run < now()
)
AND
(
job_status = 'Pending'
OR
(
job_status = 'Running'
AND
NOW() > (INTERVAL '1 millisecond' * job_timeout + job_updated)
)
)
LIMIT 1
FOR UPDATE SKIP LOCKED
)
RETURNING job_value;",
&[&queue],
)
.await?;
let row = if let Some(row) = row_opt {
row
} else {
return Ok(None);
};
let value: Json<JobInfo> = row.try_get(0)?;
let job = value.0;
debug!("Found job {} in queue {}", job.id(), queue);
Ok(Some(job))
}
async fn queue_job(&self, _queue: &str, _id: Uuid) -> Result<(), MyError> {
// Queue Job is a no-op, since jobs are always in their queue
Ok(())
}
async fn run_job(&self, _id: Uuid, _runner_id: Uuid) -> Result<(), MyError> {
// Run Job is a no-op, since jobs are marked running at fetch
Ok(())
}
async fn delete_job(&self, id: Uuid) -> Result<(), MyError> {
debug!("Deleting job {}", id);
self.db
.pool()
.get()
.await?
.execute("DELETE FROM jobs WHERE job_id = $1::UUID;", &[&id])
.await?;
Ok(())
}
async fn get_stats(&self) -> Result<Stats, MyError> {
// TODO: Stats are unimplemented
Ok(Stats::default())
}
async fn update_stats<F>(&self, _f: F) -> Result<(), MyError>
where
F: Fn(Stats) -> Stats + Send,
{
// TODO: Stats are unimplemented
Ok(())
}
}

View file

@ -1,139 +1,376 @@
use actix_rt::Arbiter;
use actix_web::{
middleware::{Compress, Logger},
web, App, HttpServer,
};
// need this for ructe
#![allow(clippy::needless_borrow)]
use std::time::Duration;
use activitystreams::iri_string::types::IriString;
use actix_web::{middleware::Compress, web, App, HttpServer};
use collector::MemoryCollector;
#[cfg(feature = "console")]
use console_subscriber::ConsoleLayer;
use error::Error;
use http_signature_normalization_actix::middleware::VerifySignature;
use metrics_exporter_prometheus::PrometheusBuilder;
use metrics_util::layers::FanoutBuilder;
use opentelemetry::{trace::TracerProvider, KeyValue};
use opentelemetry_otlp::WithExportConfig;
use opentelemetry_sdk::Resource;
use reqwest_middleware::ClientWithMiddleware;
use rustls::ServerConfig;
use tokio::task::JoinHandle;
use tracing_actix_web::TracingLogger;
use tracing_error::ErrorLayer;
use tracing_log::LogTracer;
use tracing_subscriber::{filter::Targets, layer::SubscriberExt, Layer};
mod admin;
mod apub;
mod args;
mod collector;
mod config;
mod data;
mod db;
mod error;
mod extractors;
mod future;
mod http1;
mod jobs;
mod middleware;
mod notify;
mod requests;
mod routes;
mod spawner;
mod stream;
mod telegram;
use crate::config::UrlKind;
use self::{
args::Args,
config::Config,
data::{ActorCache, Media, State},
data::{ActorCache, MediaCache, State},
db::Db,
jobs::{create_server, create_workers},
middleware::RelayResolver,
routes::{actor, inbox, index, nodeinfo, nodeinfo_meta, statics},
jobs::create_workers,
middleware::{DebugPayload, MyVerify, RelayResolver, Timings},
routes::{actor, healthz, inbox, index, nodeinfo, nodeinfo_meta, statics},
spawner::Spawner,
};
#[actix_rt::main]
async fn main() -> Result<(), anyhow::Error> {
fn init_subscriber(
software_name: &'static str,
opentelemetry_url: Option<&IriString>,
) -> color_eyre::Result<()> {
LogTracer::init()?;
color_eyre::install()?;
let targets: Targets = std::env::var("RUST_LOG")
.unwrap_or_else(|_| "info".into())
.parse()?;
let format_layer = tracing_subscriber::fmt::layer().with_filter(targets.clone());
#[cfg(feature = "console")]
let console_layer = ConsoleLayer::builder()
.with_default_env()
.server_addr(([0, 0, 0, 0], 6669))
.event_buffer_capacity(1024 * 1024)
.spawn();
let subscriber = tracing_subscriber::Registry::default()
.with(format_layer)
.with(ErrorLayer::default());
#[cfg(feature = "console")]
let subscriber = subscriber.with(console_layer);
if let Some(url) = opentelemetry_url {
let exporter = opentelemetry_otlp::SpanExporter::builder()
.with_tonic()
.with_endpoint(url.as_str())
.build()?;
let tracer_provider = opentelemetry_sdk::trace::TracerProvider::builder()
.with_resource(Resource::new(vec![KeyValue::new(
"service.name",
software_name,
)]))
.with_batch_exporter(exporter, opentelemetry_sdk::runtime::Tokio)
.build();
let otel_layer = tracing_opentelemetry::layer()
.with_tracer(tracer_provider.tracer(software_name))
.with_filter(targets);
let subscriber = subscriber.with(otel_layer);
tracing::subscriber::set_global_default(subscriber)?;
} else {
tracing::subscriber::set_global_default(subscriber)?;
}
Ok(())
}
fn build_client(
user_agent: &str,
timeout_seconds: u64,
proxy: Option<(&IriString, Option<(&str, &str)>)>,
) -> Result<ClientWithMiddleware, Error> {
let builder = reqwest::Client::builder().user_agent(user_agent.to_string());
let builder = if let Some((url, auth)) = proxy {
let proxy = reqwest::Proxy::all(url.as_str())?;
let proxy = if let Some((username, password)) = auth {
proxy.basic_auth(username, password)
} else {
proxy
};
builder.proxy(proxy)
} else {
builder
};
let client = builder
.timeout(Duration::from_secs(timeout_seconds))
.build()?;
let client_with_middleware = reqwest_middleware::ClientBuilder::new(client)
.with(reqwest_tracing::TracingMiddleware::default())
.build();
Ok(client_with_middleware)
}
#[tokio::main]
async fn main() -> color_eyre::Result<()> {
dotenv::dotenv().ok();
let config = Config::build()?;
if config.debug() {
std::env::set_var(
"RUST_LOG",
"debug,tokio_postgres=info,h2=info,trust_dns_resolver=info,trust_dns_proto=info,rustls=info,html5ever=info",
)
} else {
std::env::set_var("RUST_LOG", "info")
}
if config.pretty_log() {
pretty_env_logger::init();
} else {
env_logger::init();
}
let db = Db::build(&config)?;
init_subscriber(Config::software_name(), config.opentelemetry_url())?;
let args = Args::new();
if args.jobs_only() && args.no_jobs() {
return Err(anyhow::Error::msg(
"Either the server or the jobs must be run",
));
}
if !args.blocks().is_empty() || !args.whitelists().is_empty() {
if args.undo() {
db.remove_blocks(args.blocks()).await?;
db.remove_whitelists(args.whitelists()).await?;
} else {
db.add_blocks(args.blocks()).await?;
db.add_whitelists(args.whitelists()).await?;
}
if args.any() {
client_main(config, args).await??;
return Ok(());
}
let media = Media::new(db.clone());
let state = State::hydrate(config.clone(), &db).await?;
let collector = MemoryCollector::new();
if let Some(bind_addr) = config.prometheus_bind_address() {
let (recorder, exporter) = PrometheusBuilder::new()
.with_http_listener(bind_addr)
.build()?;
tokio::spawn(exporter);
let recorder = FanoutBuilder::default()
.add_recorder(recorder)
.add_recorder(collector.clone())
.build();
metrics::set_global_recorder(recorder).map_err(|e| color_eyre::eyre::eyre!("{e}"))?;
} else {
collector.install()?;
}
tracing::info!("Opening DB");
let db = Db::build(&config)?;
tracing::info!("Building caches");
let actors = ActorCache::new(db.clone());
let job_server = create_server(db.clone());
let media = MediaCache::new(db.clone());
notify::Notifier::new(config.database_url().parse()?)
.register(notify::NewBlocks(state.clone()))
.register(notify::NewWhitelists(state.clone()))
.register(notify::NewListeners(state.clone(), job_server.clone()))
.register(notify::NewActors(actors.clone()))
.register(notify::NewNodes(state.node_cache()))
.register(notify::RmBlocks(state.clone()))
.register(notify::RmWhitelists(state.clone()))
.register(notify::RmListeners(state.clone()))
.register(notify::RmActors(actors.clone()))
.register(notify::RmNodes(state.node_cache()))
.start();
server_main(db, actors, media, collector, config).await?;
if args.jobs_only() {
for _ in 0..num_cpus::get() {
let state = state.clone();
let actors = actors.clone();
let job_server = job_server.clone();
let media = media.clone();
let config = config.clone();
let db = db.clone();
tracing::info!("Application exit");
Arbiter::new().exec_fn(move || {
create_workers(db, state, actors, job_server, media, config);
});
Ok(())
}
fn client_main(config: Config, args: Args) -> JoinHandle<color_eyre::Result<()>> {
tokio::spawn(do_client_main(config, args))
}
async fn do_client_main(config: Config, args: Args) -> color_eyre::Result<()> {
let client = build_client(
&config.user_agent(),
config.client_timeout(),
config.proxy_config(),
)?;
if !args.blocks().is_empty() || !args.allowed().is_empty() {
if args.undo() {
admin::client::unblock(&client, &config, args.blocks().to_vec()).await?;
admin::client::disallow(&client, &config, args.allowed().to_vec()).await?;
} else {
admin::client::block(&client, &config, args.blocks().to_vec()).await?;
admin::client::allow(&client, &config, args.allowed().to_vec()).await?;
}
actix_rt::signal::ctrl_c().await?;
return Ok(());
println!("Updated lists");
}
let no_jobs = args.no_jobs();
if args.contacted() {
let last_seen = admin::client::last_seen(&client, &config).await?;
let mut report = String::from("Contacted:");
if !last_seen.never.is_empty() {
report += "\nNever seen:\n";
}
for domain in last_seen.never {
report += "\t";
report += &domain;
report += "\n";
}
if !last_seen.last_seen.is_empty() {
report += "\nSeen:\n";
}
for (datetime, domains) in last_seen.last_seen {
for domain in domains {
report += "\t";
report += &datetime.to_string();
report += " - ";
report += &domain;
report += "\n";
}
}
report += "\n";
println!("{report}");
}
if args.list() {
let (blocked, allowed, connected) = tokio::try_join!(
admin::client::blocked(&client, &config),
admin::client::allowed(&client, &config),
admin::client::connected(&client, &config)
)?;
let mut report = String::from("Report:\n");
if !allowed.allowed_domains.is_empty() {
report += "\nAllowed\n\t";
report += &allowed.allowed_domains.join("\n\t");
}
if !blocked.blocked_domains.is_empty() {
report += "\n\nBlocked\n\t";
report += &blocked.blocked_domains.join("\n\t");
}
if !connected.connected_actors.is_empty() {
report += "\n\nConnected\n\t";
report += &connected.connected_actors.join("\n\t");
}
report += "\n";
println!("{report}");
}
if args.stats() {
let stats = admin::client::stats(&client, &config).await?;
stats.present();
}
Ok(())
}
const VERIFY_RATIO: usize = 7;
async fn server_main(
db: Db,
actors: ActorCache,
media: MediaCache,
collector: MemoryCollector,
config: Config,
) -> color_eyre::Result<()> {
let client = build_client(
&config.user_agent(),
config.client_timeout(),
config.proxy_config(),
)?;
tracing::info!("Creating state");
let (signature_threads, verify_threads) = match config.signature_threads() {
0 | 1 => (1, 1),
n if n <= VERIFY_RATIO => (n, 1),
n => {
let verify_threads = (n / VERIFY_RATIO).max(1);
let signature_threads = n.saturating_sub(verify_threads).max(VERIFY_RATIO);
(signature_threads, verify_threads)
}
};
let verify_spawner = Spawner::build("verify-cpu", verify_threads.try_into()?)?;
let sign_spawner = Spawner::build("sign-cpu", signature_threads.try_into()?)?;
let key_id = config.generate_url(UrlKind::MainKey).to_string();
let state = State::build(db.clone(), key_id, sign_spawner.clone(), client).await?;
if let Some((token, admin_handle)) = config.telegram_info() {
tracing::info!("Creating telegram handler");
telegram::start(admin_handle.to_owned(), db.clone(), token);
}
let cert_resolver = config
.open_keys()
.await?
.map(rustls_channel_resolver::channel::<32>);
let bind_address = config.bind_address();
HttpServer::new(move || {
if !no_jobs {
create_workers(
db.clone(),
state.clone(),
actors.clone(),
job_server.clone(),
media.clone(),
config.clone(),
);
}
let sign_spawner2 = sign_spawner.clone();
let verify_spawner2 = verify_spawner.clone();
let config2 = config.clone();
let job_store = jobs::build_storage();
let server = HttpServer::new(move || {
let job_server = create_workers(
job_store.clone(),
state.clone(),
actors.clone(),
media.clone(),
config.clone(),
)
.expect("Failed to create job server");
App::new()
.wrap(Compress::default())
.wrap(Logger::default())
.data(db.clone())
.data(state.clone())
.data(state.requests())
.data(actors.clone())
.data(config.clone())
.data(job_server.clone())
.data(media.clone())
let app = App::new()
.app_data(web::Data::new(db.clone()))
.app_data(web::Data::new(state.clone()))
.app_data(web::Data::new(
state.requests.clone().spawner(verify_spawner.clone()),
))
.app_data(web::Data::new(actors.clone()))
.app_data(web::Data::new(config.clone()))
.app_data(web::Data::new(job_server))
.app_data(web::Data::new(media.clone()))
.app_data(web::Data::new(collector.clone()))
.app_data(web::Data::new(verify_spawner.clone()));
let app = if let Some(data) = config.admin_config() {
app.app_data(data)
} else {
app
};
app.wrap(Compress::default())
.wrap(TracingLogger::default())
.wrap(Timings)
.route("/healthz", web::get().to(healthz))
.service(web::resource("/").route(web::get().to(index)))
.service(web::resource("/media/{path}").route(web::get().to(routes::media)))
.service(
web::resource("/inbox")
.wrap(config.digest_middleware())
.wrap(config.signature_middleware(state.requests(), actors.clone()))
.wrap(config.digest_middleware().spawner(verify_spawner.clone()))
.wrap(VerifySignature::new(
MyVerify(
state.requests.clone().spawner(verify_spawner.clone()),
actors.clone(),
state.clone(),
verify_spawner.clone(),
),
http_signature_normalization_actix::Config::new(),
))
.wrap(DebugPayload(config.debug()))
.route(web::post().to(inbox)),
)
.service(web::resource("/actor").route(web::get().to(actor)))
@ -144,10 +381,59 @@ async fn main() -> Result<(), anyhow::Error> {
.service(web::resource("/nodeinfo").route(web::get().to(nodeinfo_meta))),
)
.service(web::resource("/static/{filename}").route(web::get().to(statics)))
})
.bind(bind_address)?
.run()
.await?;
.service(
web::scope("/api/v1").service(
web::scope("/admin")
.route("/allow", web::post().to(admin::routes::allow))
.route("/disallow", web::post().to(admin::routes::disallow))
.route("/block", web::post().to(admin::routes::block))
.route("/unblock", web::post().to(admin::routes::unblock))
.route("/allowed", web::get().to(admin::routes::allowed))
.route("/blocked", web::get().to(admin::routes::blocked))
.route("/connected", web::get().to(admin::routes::connected))
.route("/stats", web::get().to(admin::routes::stats))
.route("/last_seen", web::get().to(admin::routes::last_seen)),
),
)
});
if let Some((cert_tx, cert_rx)) = cert_resolver {
let handle = tokio::spawn(async move {
let mut interval = tokio::time::interval(Duration::from_secs(30));
interval.tick().await;
loop {
interval.tick().await;
match config2.open_keys().await {
Ok(Some(key)) => cert_tx.update(key),
Ok(None) => tracing::warn!("Missing TLS keys"),
Err(e) => tracing::error!("Failed to read TLS keys {e}"),
}
}
});
tracing::info!("Binding to {}:{} with TLS", bind_address.0, bind_address.1);
let server_config = ServerConfig::builder()
.with_no_client_auth()
.with_cert_resolver(cert_rx);
server
.bind_rustls_0_23(bind_address, server_config)?
.run()
.await?;
handle.abort();
let _ = handle.await;
} else {
tracing::info!("Binding to {}:{}", bind_address.0, bind_address.1);
server.bind(bind_address)?.run().await?;
}
sign_spawner2.close().await;
verify_spawner2.close().await;
tracing::info!("Server closed");
Ok(())
}

9
src/middleware.rs Normal file
View file

@ -0,0 +1,9 @@
mod payload;
mod timings;
mod verifier;
mod webfinger;
pub(crate) use payload::DebugPayload;
pub(crate) use timings::Timings;
pub(crate) use verifier::MyVerify;
pub(crate) use webfinger::RelayResolver;

View file

@ -1,5 +0,0 @@
mod verifier;
mod webfinger;
pub use verifier::MyVerify;
pub use webfinger::RelayResolver;

77
src/middleware/payload.rs Normal file
View file

@ -0,0 +1,77 @@
use actix_web::{
dev::{Payload, Service, ServiceRequest, Transform},
http::Method,
web::BytesMut,
HttpMessage,
};
use std::{
future::{ready, Ready},
task::{Context, Poll},
};
use streem::IntoStreamer;
#[derive(Clone, Debug)]
pub(crate) struct DebugPayload(pub bool);
#[doc(hidden)]
#[derive(Clone, Debug)]
pub(crate) struct DebugPayloadMiddleware<S>(bool, S);
impl<S> Transform<S, ServiceRequest> for DebugPayload
where
S: Service<ServiceRequest, Error = actix_web::Error>,
S::Future: 'static,
S::Error: 'static,
{
type Response = S::Response;
type Error = S::Error;
type InitError = ();
type Transform = DebugPayloadMiddleware<S>;
type Future = Ready<Result<Self::Transform, Self::InitError>>;
fn new_transform(&self, service: S) -> Self::Future {
ready(Ok(DebugPayloadMiddleware(self.0, service)))
}
}
impl<S> Service<ServiceRequest> for DebugPayloadMiddleware<S>
where
S: Service<ServiceRequest, Error = actix_web::Error>,
S::Future: 'static,
S::Error: 'static,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.1.poll_ready(cx)
}
fn call(&self, mut req: ServiceRequest) -> Self::Future {
if self.0 && req.method() == Method::POST {
let mut pl = req.take_payload().into_streamer();
req.set_payload(Payload::Stream {
payload: Box::pin(streem::try_from_fn(|yielder| async move {
let mut buf = BytesMut::new();
while let Some(bytes) = pl.try_next().await? {
buf.extend(bytes);
}
let bytes = buf.freeze();
tracing::info!("{}", String::from_utf8_lossy(&bytes));
yielder.yield_ok(bytes).await;
Ok(())
})),
});
self.1.call(req)
} else {
self.1.call(req)
}
}
}

143
src/middleware/timings.rs Normal file
View file

@ -0,0 +1,143 @@
use actix_web::{
body::MessageBody,
dev::{Service, ServiceRequest, ServiceResponse, Transform},
http::StatusCode,
};
use std::{
future::{ready, Future, Ready},
time::Instant,
};
pub(crate) struct Timings;
pub(crate) struct TimingsMiddleware<S>(S);
struct LogOnDrop {
begin: Instant,
path: String,
method: String,
arm: bool,
}
pin_project_lite::pin_project! {
pub(crate) struct TimingsFuture<F> {
#[pin]
future: F,
log_on_drop: Option<LogOnDrop>,
}
}
pin_project_lite::pin_project! {
pub(crate) struct TimingsBody<B> {
#[pin]
body: B,
log_on_drop: LogOnDrop,
}
}
impl Drop for LogOnDrop {
fn drop(&mut self) {
if self.arm {
let duration = self.begin.elapsed();
metrics::histogram!("relay.request.complete", "path" => self.path.clone(), "method" => self.method.clone()).record(duration);
}
}
}
impl<S, B> Transform<S, ServiceRequest> for Timings
where
S: Service<ServiceRequest, Response = ServiceResponse<B>, Error = actix_web::Error>,
S::Future: 'static,
{
type Response = ServiceResponse<TimingsBody<B>>;
type Error = S::Error;
type InitError = ();
type Transform = TimingsMiddleware<S>;
type Future = Ready<Result<Self::Transform, Self::InitError>>;
fn new_transform(&self, service: S) -> Self::Future {
ready(Ok(TimingsMiddleware(service)))
}
}
impl<S, B> Service<ServiceRequest> for TimingsMiddleware<S>
where
S: Service<ServiceRequest, Response = ServiceResponse<B>, Error = actix_web::Error>,
S::Future: 'static,
{
type Response = ServiceResponse<TimingsBody<B>>;
type Error = S::Error;
type Future = TimingsFuture<S::Future>;
fn poll_ready(
&self,
ctx: &mut core::task::Context<'_>,
) -> std::task::Poll<Result<(), Self::Error>> {
self.0.poll_ready(ctx)
}
fn call(&self, req: ServiceRequest) -> Self::Future {
let log_on_drop = LogOnDrop {
begin: Instant::now(),
path: format!("{:?}", req.match_pattern()),
method: req.method().to_string(),
arm: false,
};
let future = self.0.call(req);
TimingsFuture {
future,
log_on_drop: Some(log_on_drop),
}
}
}
impl<F, B> Future for TimingsFuture<F>
where
F: Future<Output = Result<ServiceResponse<B>, actix_web::Error>>,
{
type Output = Result<ServiceResponse<TimingsBody<B>>, actix_web::Error>;
fn poll(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Self::Output> {
let this = self.project();
let res = std::task::ready!(this.future.poll(cx));
let mut log_on_drop = this
.log_on_drop
.take()
.expect("TimingsFuture polled after completion");
let status = match &res {
Ok(res) => res.status(),
Err(e) => e.as_response_error().status_code(),
};
log_on_drop.arm =
status != StatusCode::NOT_FOUND && status != StatusCode::METHOD_NOT_ALLOWED;
let res = res.map(|r| r.map_body(|_, body| TimingsBody { body, log_on_drop }));
std::task::Poll::Ready(res)
}
}
impl<B: MessageBody> MessageBody for TimingsBody<B> {
type Error = B::Error;
fn size(&self) -> actix_web::body::BodySize {
self.body.size()
}
fn poll_next(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Result<actix_web::web::Bytes, Self::Error>>> {
self.project().body.poll_next(cx)
}
}

View file

@ -1,87 +1,162 @@
use crate::{data::ActorCache, error::MyError, requests::Requests};
use activitystreams_new::uri;
use actix_web::web;
use http_signature_normalization_actix::{prelude::*, verify::DeprecatedAlgorithm};
use log::error;
use rsa::{hash::Hashes, padding::PaddingScheme, PublicKey, RSAPublicKey};
use rsa_pem::KeyExt;
use sha2::{Digest, Sha256};
use crate::{
apub::AcceptedActors,
data::{ActorCache, State},
error::{Error, ErrorKind},
requests::{BreakerStrategy, Requests},
spawner::Spawner,
};
use activitystreams::{base::BaseExt, iri, iri_string::types::IriString};
use base64::{engine::general_purpose::STANDARD, Engine};
use http_signature_normalization_actix::{prelude::*, verify::DeprecatedAlgorithm, Spawn};
use rsa::{pkcs1::EncodeRsaPublicKey, pkcs8::DecodePublicKey, RsaPublicKey};
use std::{future::Future, pin::Pin};
#[derive(Clone)]
pub struct MyVerify(pub Requests, pub ActorCache);
#[derive(Clone, Debug)]
pub(crate) struct MyVerify(pub Requests, pub ActorCache, pub State, pub Spawner);
impl MyVerify {
#[tracing::instrument("Verify request", skip(self, signature, signing_string))]
async fn verify(
&self,
algorithm: Option<Algorithm>,
key_id: String,
signature: String,
signing_string: String,
) -> Result<bool, MyError> {
let mut uri = uri!(key_id);
uri.as_url_mut().set_fragment(None);
let actor = self.1.get(&uri, &self.0).await?;
let was_cached = actor.is_cached();
let actor = actor.into_inner();
) -> Result<bool, Error> {
let public_key_id = iri!(key_id);
match algorithm {
Some(Algorithm::Hs2019) => (),
Some(Algorithm::Deprecated(DeprecatedAlgorithm::RsaSha256)) => (),
Some(other) => {
return Err(MyError::Algorithm(other.to_string()));
// receiving an activity from a domain indicates it is probably online
self.0.reset_breaker(&public_key_id);
let actor_id = if let Some(mut actor_id) = self
.2
.db
.actor_id_from_public_key_id(public_key_id.clone())
.await?
{
if !self.2.db.is_allowed(actor_id.clone()).await? {
return Err(ErrorKind::NotAllowed(key_id).into());
}
None => (),
};
let res = do_verify(&actor.public_key, signature.clone(), signing_string.clone()).await;
actor_id.set_fragment(None);
let actor = self.1.get(&actor_id, &self.0).await?;
let was_cached = actor.is_cached();
let actor = actor.into_inner();
if let Err(e) = res {
if !was_cached {
return Err(e);
match algorithm {
Some(Algorithm::Hs2019) => (),
Some(Algorithm::Deprecated(DeprecatedAlgorithm::RsaSha256)) => (),
Some(other) => {
return Err(ErrorKind::Algorithm(other.to_string()).into());
}
None => (),
};
let res = do_verify(
&self.3,
&actor.public_key,
signature.clone(),
signing_string.clone(),
)
.await;
if let Err(e) = res {
if !was_cached {
return Err(e);
}
} else {
return Ok(true);
}
actor_id
} else {
return Ok(true);
}
match self
.0
.fetch::<PublicKeyResponse>(&public_key_id, BreakerStrategy::Require2XX)
.await
{
Ok(res) => res.actor_id().ok_or(ErrorKind::MissingId),
Err(e) => {
if e.is_gone() {
tracing::warn!("Actor gone: {public_key_id}");
return Ok(false);
} else {
return Err(e);
}
}
}?
};
// Previously we verified the sig from an actor's local cache
//
// Now we make sure we fetch an updated actor
let actor = self.1.get_no_cache(&uri, &self.0).await?;
let actor = self.1.get_no_cache(&actor_id, &self.0).await?;
do_verify(&actor.public_key, signature, signing_string).await?;
do_verify(&self.3, &actor.public_key, signature, signing_string).await?;
Ok(true)
}
}
#[derive(serde::Deserialize)]
#[serde(untagged)]
#[serde(rename_all = "camelCase")]
enum PublicKeyResponse {
PublicKey {
#[allow(dead_code)]
id: IriString,
owner: IriString,
#[allow(dead_code)]
public_key_pem: String,
},
Actor(Box<AcceptedActors>),
}
impl PublicKeyResponse {
fn actor_id(&self) -> Option<IriString> {
match self {
PublicKeyResponse::PublicKey { owner, .. } => Some(owner.clone()),
PublicKeyResponse::Actor(actor) => actor.id_unchecked().cloned(),
}
}
}
#[tracing::instrument("Verify signature")]
async fn do_verify(
spawner: &Spawner,
public_key: &str,
signature: String,
signing_string: String,
) -> Result<(), MyError> {
let public_key = RSAPublicKey::from_pem_pkcs8(public_key)?;
) -> Result<(), Error> {
let public_key = RsaPublicKey::from_public_key_pem(public_key.trim())?;
let public_key_der = public_key
.to_pkcs1_der()
.map_err(|_| ErrorKind::DerEncode)?;
let public_key = ring::signature::UnparsedPublicKey::new(
&ring::signature::RSA_PKCS1_2048_8192_SHA256,
public_key_der,
);
web::block(move || {
let decoded = base64::decode(signature)?;
let hashed = Sha256::digest(signing_string.as_bytes());
let span = tracing::Span::current();
spawner
.spawn_blocking(move || {
span.in_scope(|| {
let decoded = STANDARD.decode(signature)?;
public_key.verify(
PaddingScheme::PKCS1v15,
Some(&Hashes::SHA2_256),
&hashed,
&decoded,
)?;
public_key
.verify(signing_string.as_bytes(), decoded.as_slice())
.map_err(|_| ErrorKind::VerifySignature)?;
Ok(()) as Result<(), MyError>
})
.await?;
Ok(()) as Result<(), Error>
})
})
.await??;
Ok(())
}
impl SignatureVerify for MyVerify {
type Error = MyError;
type Error = Error;
type Future = Pin<Box<dyn Future<Output = Result<bool, Self::Error>>>>;
fn signature_verify(
@ -96,10 +171,39 @@ impl SignatureVerify for MyVerify {
Box::pin(async move {
this.verify(algorithm, key_id, signature, signing_string)
.await
.map_err(|e| {
error!("Failed to verify, {}", e);
e
})
})
}
}
#[cfg(test)]
mod tests {
use crate::apub::AcceptedActors;
use rsa::{pkcs8::DecodePublicKey, RsaPublicKey};
const ASONIX_DOG_ACTOR: &str = r#"{"@context":["https://www.w3.org/ns/activitystreams","https://w3id.org/security/v1",{"manuallyApprovesFollowers":"as:manuallyApprovesFollowers","toot":"http://joinmastodon.org/ns#","featured":{"@id":"toot:featured","@type":"@id"},"featuredTags":{"@id":"toot:featuredTags","@type":"@id"},"alsoKnownAs":{"@id":"as:alsoKnownAs","@type":"@id"},"movedTo":{"@id":"as:movedTo","@type":"@id"},"schema":"http://schema.org#","PropertyValue":"schema:PropertyValue","value":"schema:value","discoverable":"toot:discoverable","Device":"toot:Device","Ed25519Signature":"toot:Ed25519Signature","Ed25519Key":"toot:Ed25519Key","Curve25519Key":"toot:Curve25519Key","EncryptedMessage":"toot:EncryptedMessage","publicKeyBase64":"toot:publicKeyBase64","deviceId":"toot:deviceId","claim":{"@type":"@id","@id":"toot:claim"},"fingerprintKey":{"@type":"@id","@id":"toot:fingerprintKey"},"identityKey":{"@type":"@id","@id":"toot:identityKey"},"devices":{"@type":"@id","@id":"toot:devices"},"messageFranking":"toot:messageFranking","messageType":"toot:messageType","cipherText":"toot:cipherText","suspended":"toot:suspended"}],"id":"https://masto.asonix.dog/actor","type":"Application","inbox":"https://masto.asonix.dog/actor/inbox","outbox":"https://masto.asonix.dog/actor/outbox","preferredUsername":"masto.asonix.dog","url":"https://masto.asonix.dog/about/more?instance_actor=true","manuallyApprovesFollowers":true,"publicKey":{"id":"https://masto.asonix.dog/actor#main-key","owner":"https://masto.asonix.dog/actor","publicKeyPem":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx8zXS0QNg9YGUBsxAOBH\nJaxIn7i6t+Z4UOpSFDVa2kP0NvQgIJsq3wzRqvaiuncRWpkyFk1fTakiRGD32xnY\nt+juuAaIBlU8eswKyANFqhcLAvFHmT3rA1848M4/YM19djvlL/PR9T53tPNHU+el\nS9MlsG3o6Zsj8YaUJtCI8RgEuJoROLHUb/V9a3oMQ7CfuIoSvF3VEz3/dRT09RW6\n0wQX7yhka9WlKuayWLWmTcB9lAIX6neBk+qKc8VSEsO7mHkzB8mRgVcS2uYZl1eA\nD8/jTT+SlpeFNDZk0Oh35GNFoOxh9qjRw3NGxu7jJCVBehDODzasOv4xDxKAhONa\njQIDAQAB\n-----END PUBLIC KEY-----\n"},"endpoints":{"sharedInbox":"https://masto.asonix.dog/inbox"}}"#;
const KARJALAZET_RELAY: &str = r#"{"@context":["https://www.w3.org/ns/activitystreams","https://pleroma.karjalazet.se/schemas/litepub-0.1.jsonld",{"@language":"und"}],"alsoKnownAs":[],"attachment":[],"capabilities":{},"discoverable":false,"endpoints":{"oauthAuthorizationEndpoint":"https://pleroma.karjalazet.se/oauth/authorize","oauthRegistrationEndpoint":"https://pleroma.karjalazet.se/api/v1/apps","oauthTokenEndpoint":"https://pleroma.karjalazet.se/oauth/token","sharedInbox":"https://pleroma.karjalazet.se/inbox","uploadMedia":"https://pleroma.karjalazet.se/api/ap/upload_media"},"featured":"https://pleroma.karjalazet.se/relay/collections/featured","followers":"https://pleroma.karjalazet.se/relay/followers","following":"https://pleroma.karjalazet.se/relay/following","id":"https://pleroma.karjalazet.se/relay","inbox":"https://pleroma.karjalazet.se/relay/inbox","manuallyApprovesFollowers":false,"name":null,"outbox":"https://pleroma.karjalazet.se/relay/outbox","preferredUsername":"relay","publicKey":{"id":"https://pleroma.karjalazet.se/relay#main-key","owner":"https://pleroma.karjalazet.se/relay","publicKeyPem":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAucoyCht6QpEzUPdQWP/J\nJYxObSH3MCcXBnG4d0OX78QshloeAHhl78EZ5c8I0ePmIjDg2NFK3/pG0EvSrHe2\nIZHnHaN5emgCb2ifNya5W572yfQXo1tUQy+ZXtbTUA7BWbr4LuCvd+HUavMwbx72\neraSZTiQj//ObwpbXFoZO5I/+e5avGmVnfmr/y2cG95hqFDtI3438RgZyBjY5kJM\nY1MLWoY9itGSfYmBtxRj3umlC2bPuBB+hHUJi6TvP7NO6zuUZ66m4ETyuBDi8iP6\ngnUp3Q4+1/I3nDUmhjt7OXckUcX3r5M4UHD3VVUFG0aZw6WWMEAxlyFf/07fCkhR\nBwIDAQAB\n-----END PUBLIC KEY-----\n\n"},"summary":"","tag":[],"type":"Person","url":"https://pleroma.karjalazet.se/relay"}"#;
const ASONIX_DOG_KEY: &str = "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx8zXS0QNg9YGUBsxAOBH\nJaxIn7i6t+Z4UOpSFDVa2kP0NvQgIJsq3wzRqvaiuncRWpkyFk1fTakiRGD32xnY\nt+juuAaIBlU8eswKyANFqhcLAvFHmT3rA1848M4/YM19djvlL/PR9T53tPNHU+el\nS9MlsG3o6Zsj8YaUJtCI8RgEuJoROLHUb/V9a3oMQ7CfuIoSvF3VEz3/dRT09RW6\n0wQX7yhka9WlKuayWLWmTcB9lAIX6neBk+qKc8VSEsO7mHkzB8mRgVcS2uYZl1eA\nD8/jTT+SlpeFNDZk0Oh35GNFoOxh9qjRw3NGxu7jJCVBehDODzasOv4xDxKAhONa\njQIDAQAB\n-----END PUBLIC KEY-----\n";
const KARJALAZET_KEY: &str = "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAucoyCht6QpEzUPdQWP/J\nJYxObSH3MCcXBnG4d0OX78QshloeAHhl78EZ5c8I0ePmIjDg2NFK3/pG0EvSrHe2\nIZHnHaN5emgCb2ifNya5W572yfQXo1tUQy+ZXtbTUA7BWbr4LuCvd+HUavMwbx72\neraSZTiQj//ObwpbXFoZO5I/+e5avGmVnfmr/y2cG95hqFDtI3438RgZyBjY5kJM\nY1MLWoY9itGSfYmBtxRj3umlC2bPuBB+hHUJi6TvP7NO6zuUZ66m4ETyuBDi8iP6\ngnUp3Q4+1/I3nDUmhjt7OXckUcX3r5M4UHD3VVUFG0aZw6WWMEAxlyFf/07fCkhR\nBwIDAQAB\n-----END PUBLIC KEY-----\n\n";
#[test]
fn handles_masto_keys() {
println!("{ASONIX_DOG_KEY}");
let _ = RsaPublicKey::from_public_key_pem(ASONIX_DOG_KEY.trim()).unwrap();
}
#[test]
fn handles_pleromo_keys() {
println!("{KARJALAZET_KEY}");
let _ = RsaPublicKey::from_public_key_pem(KARJALAZET_KEY.trim()).unwrap();
}
#[test]
fn handles_pleromo_relay_format() {
let _: AcceptedActors = serde_json::from_str(KARJALAZET_RELAY).unwrap();
}
#[test]
fn handles_masto_relay_format() {
let _: AcceptedActors = serde_json::from_str(ASONIX_DOG_ACTOR).unwrap();
}
}

View file

@ -1,33 +1,39 @@
use crate::{
config::{Config, UrlKind},
data::State,
future::LocalBoxFuture,
};
use actix_web::web::Data;
use actix_webfinger::{Resolver, Webfinger};
use rsa_magic_public_key::AsMagicPublicKey;
use std::{future::Future, pin::Pin};
pub struct RelayResolver;
pub(crate) struct RelayResolver;
#[derive(Clone, Debug, thiserror::Error)]
#[error("Error resolving webfinger data")]
pub struct RelayError;
type FutResult<T, E> = dyn Future<Output = Result<T, E>>;
pub(crate) struct RelayError;
impl Resolver for RelayResolver {
type State = (Data<State>, Data<Config>);
type Error = RelayError;
fn find(
scheme: Option<&str>,
account: &str,
domain: &str,
(state, config): Self::State,
) -> Pin<Box<FutResult<Option<Webfinger>, Self::Error>>> {
) -> LocalBoxFuture<'static, Result<Option<Webfinger>, Self::Error>> {
let domain = domain.to_owned();
let account = account.to_owned();
let scheme = scheme.map(|scheme| scheme.to_owned());
let fut = async move {
if let Some(scheme) = scheme {
if scheme != "acct:" {
return Ok(None);
}
}
if domain != config.hostname() {
return Ok(None);
}

View file

@ -1,263 +0,0 @@
use crate::{
data::{ActorCache, NodeCache, State},
db::listen,
jobs::{JobServer, QueryInstance, QueryNodeinfo},
};
use activitystreams_new::primitives::XsdAnyUri;
use actix_rt::{spawn, time::delay_for};
use futures::stream::{poll_fn, StreamExt};
use log::{debug, error, warn};
use std::{collections::HashMap, sync::Arc, time::Duration};
use tokio_postgres::{tls::NoTls, AsyncMessage, Config};
use uuid::Uuid;
pub trait Listener {
fn key(&self) -> &str;
fn execute(&self, payload: &str);
}
pub struct Notifier {
config: Config,
listeners: HashMap<String, Vec<Box<dyn Listener + Send + Sync + 'static>>>,
}
impl Notifier {
pub fn new(config: Config) -> Self {
Notifier {
config,
listeners: HashMap::new(),
}
}
pub fn register<L>(mut self, l: L) -> Self
where
L: Listener + Send + Sync + 'static,
{
let v = self
.listeners
.entry(l.key().to_owned())
.or_insert_with(Vec::new);
v.push(Box::new(l));
self
}
pub fn start(self) {
spawn(async move {
let Notifier { config, listeners } = self;
loop {
let (new_client, mut conn) = match config.connect(NoTls).await {
Ok((client, conn)) => (client, conn),
Err(e) => {
error!("Error establishing DB Connection, {}", e);
delay_for(Duration::new(5, 0)).await;
continue;
}
};
let client = Arc::new(new_client);
let new_client = client.clone();
spawn(async move {
if let Err(e) = listen(&new_client).await {
error!("Error listening for updates, {}", e);
}
});
let mut stream = poll_fn(move |cx| conn.poll_message(cx));
loop {
match stream.next().await {
Some(Ok(AsyncMessage::Notification(n))) => {
debug!("Handling Notification, {:?}", n);
if let Some(v) = listeners.get(n.channel()) {
for l in v {
l.execute(n.payload());
}
}
}
Some(Ok(AsyncMessage::Notice(e))) => {
debug!("Handling Notice, {:?}", e);
}
Some(Ok(_)) => {
debug!("Handling rest");
}
Some(Err(e)) => {
debug!("Breaking loop due to error Error, {:?}", e);
break;
}
None => {
debug!("End of stream, breaking loop");
break;
}
}
}
drop(client);
warn!("Restarting listener task");
}
});
}
}
pub struct NewBlocks(pub State);
pub struct NewWhitelists(pub State);
pub struct NewListeners(pub State, pub JobServer);
pub struct NewActors(pub ActorCache);
pub struct NewNodes(pub NodeCache);
pub struct RmBlocks(pub State);
pub struct RmWhitelists(pub State);
pub struct RmListeners(pub State);
pub struct RmActors(pub ActorCache);
pub struct RmNodes(pub NodeCache);
impl Listener for NewBlocks {
fn key(&self) -> &str {
"new_blocks"
}
fn execute(&self, payload: &str) {
debug!("Caching block of {}", payload);
let state = self.0.clone();
let payload = payload.to_owned();
spawn(async move { state.cache_block(payload).await });
}
}
impl Listener for NewWhitelists {
fn key(&self) -> &str {
"new_whitelists"
}
fn execute(&self, payload: &str) {
debug!("Caching whitelist of {}", payload);
let state = self.0.clone();
let payload = payload.to_owned();
spawn(async move { state.cache_whitelist(payload.to_owned()).await });
}
}
impl Listener for NewListeners {
fn key(&self) -> &str {
"new_listeners"
}
fn execute(&self, payload: &str) {
if let Ok(uri) = payload.parse::<XsdAnyUri>() {
debug!("Caching listener {}", uri);
let state = self.0.clone();
let _ = self.1.queue(QueryInstance::new(uri.clone()));
let _ = self.1.queue(QueryNodeinfo::new(uri.clone()));
spawn(async move { state.cache_listener(uri).await });
} else {
warn!("Not caching listener {}, parse error", payload);
}
}
}
impl Listener for NewActors {
fn key(&self) -> &str {
"new_actors"
}
fn execute(&self, payload: &str) {
if let Ok(uri) = payload.parse::<XsdAnyUri>() {
debug!("Caching actor {}", uri);
let actors = self.0.clone();
spawn(async move { actors.cache_follower(uri).await });
} else {
warn!("Not caching actor {}, parse error", payload);
}
}
}
impl Listener for NewNodes {
fn key(&self) -> &str {
"new_nodes"
}
fn execute(&self, payload: &str) {
if let Ok(uuid) = payload.parse::<Uuid>() {
debug!("Caching node {}", uuid);
let nodes = self.0.clone();
spawn(async move { nodes.cache_by_id(uuid).await });
} else {
warn!("Not caching node {}, parse error", payload);
}
}
}
impl Listener for RmBlocks {
fn key(&self) -> &str {
"rm_blocks"
}
fn execute(&self, payload: &str) {
debug!("Busting block cache for {}", payload);
let state = self.0.clone();
let payload = payload.to_owned();
spawn(async move { state.bust_block(&payload).await });
}
}
impl Listener for RmWhitelists {
fn key(&self) -> &str {
"rm_whitelists"
}
fn execute(&self, payload: &str) {
debug!("Busting whitelist cache for {}", payload);
let state = self.0.clone();
let payload = payload.to_owned();
spawn(async move { state.bust_whitelist(&payload).await });
}
}
impl Listener for RmListeners {
fn key(&self) -> &str {
"rm_listeners"
}
fn execute(&self, payload: &str) {
if let Ok(uri) = payload.parse::<XsdAnyUri>() {
debug!("Busting listener cache for {}", uri);
let state = self.0.clone();
spawn(async move { state.bust_listener(&uri).await });
} else {
warn!("Not busting listener cache for {}", payload);
}
}
}
impl Listener for RmActors {
fn key(&self) -> &str {
"rm_actors"
}
fn execute(&self, payload: &str) {
if let Ok(uri) = payload.parse::<XsdAnyUri>() {
debug!("Busting actor cache for {}", uri);
let actors = self.0.clone();
spawn(async move { actors.bust_follower(&uri).await });
} else {
warn!("Not busting actor cache for {}", payload);
}
}
}
impl Listener for RmNodes {
fn key(&self) -> &str {
"rm_nodes"
}
fn execute(&self, payload: &str) {
if let Ok(uuid) = payload.parse::<Uuid>() {
debug!("Caching node {}", uuid);
let nodes = self.0.clone();
spawn(async move { nodes.bust_by_id(uuid).await });
} else {
warn!("Not caching node {}, parse error", payload);
}
}
}

View file

@ -1,227 +1,458 @@
use crate::error::MyError;
use activitystreams_new::primitives::XsdAnyUri;
use actix_web::{client::Client, http::header::Date};
use bytes::Bytes;
use http_signature_normalization_actix::prelude::*;
use log::{debug, info, warn};
use rsa::{hash::Hashes, padding::PaddingScheme, RSAPrivateKey};
use sha2::{Digest, Sha256};
use crate::{
data::LastOnline,
error::{Error, ErrorKind},
spawner::Spawner,
stream::{aggregate, limit_stream},
};
use activitystreams::iri_string::types::IriString;
use actix_web::http::header::Date;
use base64::{engine::general_purpose::STANDARD, Engine};
use dashmap::DashMap;
use http_signature_normalization_reqwest::{digest::ring::Sha256, prelude::*};
use reqwest_middleware::ClientWithMiddleware;
use ring::{
rand::SystemRandom,
signature::{RsaKeyPair, RSA_PKCS1_SHA256},
};
use rsa::{pkcs1::EncodeRsaPrivateKey, RsaPrivateKey};
use std::{
cell::RefCell,
rc::Rc,
sync::atomic::{AtomicUsize, Ordering},
time::SystemTime,
sync::Arc,
time::{Duration, SystemTime},
};
const ONE_SECOND: u64 = 1;
const ONE_MINUTE: u64 = 60 * ONE_SECOND;
const ONE_HOUR: u64 = 60 * ONE_MINUTE;
const ONE_DAY: u64 = 24 * ONE_HOUR;
// 20 KB
const JSON_SIZE_LIMIT: usize = 20 * 1024;
#[derive(Debug)]
pub(crate) enum BreakerStrategy {
// Requires a successful response
Require2XX,
// Allows HTTP 2xx-401
Allow401AndBelow,
// Allows HTTP 2xx-404
Allow404AndBelow,
}
#[derive(Clone)]
pub struct Requests {
client: Rc<RefCell<Client>>,
consecutive_errors: Rc<AtomicUsize>,
error_limit: usize,
pub(crate) struct Breakers {
inner: Arc<DashMap<String, Breaker>>,
}
impl std::fmt::Debug for Breakers {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Breakers").finish()
}
}
impl Breakers {
pub(crate) fn should_try(&self, url: &IriString) -> bool {
if let Some(authority) = url.authority_str() {
if let Some(breaker) = self.inner.get(authority) {
breaker.should_try()
} else {
true
}
} else {
false
}
}
fn fail(&self, url: &IriString) {
if let Some(authority) = url.authority_str() {
let should_write = {
if let Some(mut breaker) = self.inner.get_mut(authority) {
breaker.fail();
if !breaker.should_try() {
tracing::warn!("Failed breaker for {authority}");
}
false
} else {
true
}
};
if should_write {
let mut breaker = self.inner.entry(authority.to_owned()).or_default();
breaker.fail();
}
}
}
fn succeed(&self, url: &IriString) {
if let Some(authority) = url.authority_str() {
let should_write = {
if let Some(mut breaker) = self.inner.get_mut(authority) {
breaker.succeed();
false
} else {
true
}
};
if should_write {
let mut breaker = self.inner.entry(authority.to_owned()).or_default();
breaker.succeed();
}
}
}
}
impl Default for Breakers {
fn default() -> Self {
Breakers {
inner: Arc::new(DashMap::new()),
}
}
}
#[derive(Debug)]
struct Breaker {
failures: usize,
last_attempt: SystemTime,
last_success: SystemTime,
}
impl Breaker {
const FAILURE_WAIT: Duration = Duration::from_secs(ONE_DAY);
const FAILURE_THRESHOLD: usize = 10;
fn should_try(&self) -> bool {
self.failures < Self::FAILURE_THRESHOLD
|| self.last_attempt + Self::FAILURE_WAIT < SystemTime::now()
}
fn fail(&mut self) {
self.failures += 1;
self.last_attempt = SystemTime::now();
}
fn succeed(&mut self) {
self.failures = 0;
self.last_attempt = SystemTime::now();
self.last_success = SystemTime::now();
}
}
impl Default for Breaker {
fn default() -> Self {
let now = SystemTime::now();
Breaker {
failures: 0,
last_attempt: now,
last_success: now,
}
}
}
#[derive(Clone)]
pub(crate) struct Requests {
client: ClientWithMiddleware,
key_id: String,
user_agent: String,
private_key: RSAPrivateKey,
config: Config,
private_key: Arc<RsaKeyPair>,
rng: SystemRandom,
config: Config<Spawner>,
breakers: Breakers,
last_online: Arc<LastOnline>,
}
impl std::fmt::Debug for Requests {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Requests")
.field("key_id", &self.key_id)
.field("config", &self.config)
.field("breakers", &self.breakers)
.finish()
}
}
impl Requests {
pub fn new(key_id: String, private_key: RSAPrivateKey, user_agent: String) -> Self {
#[allow(clippy::too_many_arguments)]
pub(crate) fn new(
key_id: String,
private_key: RsaPrivateKey,
breakers: Breakers,
last_online: Arc<LastOnline>,
spawner: Spawner,
client: ClientWithMiddleware,
) -> Self {
let private_key_der = private_key.to_pkcs1_der().expect("Can encode der");
let private_key = ring::signature::RsaKeyPair::from_der(private_key_der.as_bytes())
.expect("Key is valid");
Requests {
client: Rc::new(RefCell::new(
Client::build()
.header("User-Agent", user_agent.clone())
.finish(),
)),
consecutive_errors: Rc::new(AtomicUsize::new(0)),
error_limit: 3,
client,
key_id,
user_agent,
private_key,
config: Config::default().dont_use_created_field(),
private_key: Arc::new(private_key),
rng: SystemRandom::new(),
config: Config::new_with_spawner(spawner).mastodon_compat(),
breakers,
last_online,
}
}
fn count_err(&self) {
let count = self.consecutive_errors.fetch_add(1, Ordering::Relaxed);
if count + 1 >= self.error_limit {
warn!("{} consecutive errors, rebuilding http client", count);
*self.client.borrow_mut() = Client::build()
.header("User-Agent", self.user_agent.clone())
.finish();
self.reset_err();
pub(crate) fn spawner(mut self, spawner: Spawner) -> Self {
self.config = self.config.set_spawner(spawner);
self
}
pub(crate) fn reset_breaker(&self, iri: &IriString) {
self.breakers.succeed(iri);
}
async fn check_response(
&self,
parsed_url: &IriString,
strategy: BreakerStrategy,
res: Result<reqwest::Response, reqwest_middleware::Error>,
) -> Result<reqwest::Response, Error> {
if res.is_err() {
self.breakers.fail(&parsed_url);
}
let res = res?;
let status = res.status();
let success = match strategy {
BreakerStrategy::Require2XX => status.is_success(),
BreakerStrategy::Allow401AndBelow => (200..=401).contains(&status.as_u16()),
BreakerStrategy::Allow404AndBelow => (200..=404).contains(&status.as_u16()),
};
if !success {
self.breakers.fail(&parsed_url);
if let Ok(s) = res.text().await {
if !s.is_empty() {
tracing::debug!("Response from {parsed_url}, {s}");
}
}
return Err(ErrorKind::Status(
parsed_url.to_string(),
crate::http1::status_to_http02(status),
)
.into());
}
// only actually succeed a breaker on 2xx response
if status.is_success() {
self.last_online.mark_seen(&parsed_url);
self.breakers.succeed(&parsed_url);
}
Ok(res)
}
fn reset_err(&self) {
self.consecutive_errors.swap(0, Ordering::Relaxed);
}
pub async fn fetch<T>(&self, url: &str) -> Result<T, MyError>
#[tracing::instrument(name = "Fetch Json", skip(self), fields(signing_string))]
pub(crate) async fn fetch_json<T>(
&self,
url: &IriString,
strategy: BreakerStrategy,
) -> Result<T, Error>
where
T: serde::de::DeserializeOwned,
{
let signer = self.signer();
let client: Client = self.client.borrow().clone();
let res = client
.get(url)
.header("Accept", "application/activity+json")
.set(Date(SystemTime::now().into()))
.signature(
self.config.clone(),
self.key_id.clone(),
move |signing_string| signer.sign(signing_string),
)
.await?
.send()
.await;
if res.is_err() {
self.count_err();
}
let mut res = res.map_err(|e| MyError::SendRequest(url.to_string(), e.to_string()))?;
self.reset_err();
if !res.status().is_success() {
if let Ok(bytes) = res.body().await {
if let Ok(s) = String::from_utf8(bytes.as_ref().to_vec()) {
if !s.is_empty() {
debug!("Response from {}, {}", url, s);
}
}
}
return Err(MyError::Status(res.status()));
}
res.json()
.await
.map_err(|e| MyError::ReceiveResponse(url.to_string(), e.to_string()))
self.do_fetch(url, "application/json", strategy).await
}
pub async fn fetch_bytes(&self, url: &str) -> Result<(String, Bytes), MyError> {
info!("Fetching bytes for {}", url);
let signer = self.signer();
let client: Client = self.client.borrow().clone();
let res = client
.get(url)
.header("Accept", "*/*")
.set(Date(SystemTime::now().into()))
.signature(
self.config.clone(),
self.key_id.clone(),
move |signing_string| signer.sign(signing_string),
)
.await?
.send()
.await;
if res.is_err() {
self.count_err();
}
let mut res = res.map_err(|e| MyError::SendRequest(url.to_string(), e.to_string()))?;
self.reset_err();
let content_type = if let Some(content_type) = res.headers().get("content-type") {
if let Ok(s) = content_type.to_str() {
s.to_owned()
} else {
return Err(MyError::ContentType);
}
} else {
return Err(MyError::ContentType);
};
if !res.status().is_success() {
if let Ok(bytes) = res.body().await {
if let Ok(s) = String::from_utf8(bytes.as_ref().to_vec()) {
if !s.is_empty() {
debug!("Response from {}, {}", url, s);
}
}
}
return Err(MyError::Status(res.status()));
}
let bytes = match res.body().limit(1024 * 1024 * 4).await {
Err(e) => {
return Err(MyError::ReceiveResponse(url.to_string(), e.to_string()));
}
Ok(bytes) => bytes,
};
Ok((content_type, bytes))
}
pub async fn deliver<T>(&self, inbox: XsdAnyUri, item: &T) -> Result<(), MyError>
#[tracing::instrument(name = "Fetch Json", skip(self), fields(signing_string))]
pub(crate) async fn fetch_json_msky<T>(
&self,
url: &IriString,
strategy: BreakerStrategy,
) -> Result<T, Error>
where
T: serde::ser::Serialize,
T: serde::de::DeserializeOwned,
{
let stream = self
.do_deliver(
url,
&serde_json::json!({}),
"application/json",
"application/json",
strategy,
)
.await?
.bytes_stream();
let body = aggregate(limit_stream(stream, JSON_SIZE_LIMIT)).await?;
Ok(serde_json::from_slice(&body)?)
}
#[tracing::instrument(name = "Fetch Activity+Json", skip(self), fields(signing_string))]
pub(crate) async fn fetch<T>(
&self,
url: &IriString,
strategy: BreakerStrategy,
) -> Result<T, Error>
where
T: serde::de::DeserializeOwned,
{
self.do_fetch(url, "application/activity+json", strategy)
.await
}
async fn do_fetch<T>(
&self,
url: &IriString,
accept: &str,
strategy: BreakerStrategy,
) -> Result<T, Error>
where
T: serde::de::DeserializeOwned,
{
let stream = self
.do_fetch_response(url, accept, strategy)
.await?
.bytes_stream();
let body = aggregate(limit_stream(stream, JSON_SIZE_LIMIT)).await?;
Ok(serde_json::from_slice(&body)?)
}
#[tracing::instrument(name = "Fetch response", skip(self), fields(signing_string))]
pub(crate) async fn fetch_response(
&self,
url: &IriString,
strategy: BreakerStrategy,
) -> Result<reqwest::Response, Error> {
self.do_fetch_response(url, "*/*", strategy).await
}
pub(crate) async fn do_fetch_response(
&self,
url: &IriString,
accept: &str,
strategy: BreakerStrategy,
) -> Result<reqwest::Response, Error> {
if !self.breakers.should_try(url) {
return Err(ErrorKind::Breaker.into());
}
let signer = self.signer();
let span = tracing::Span::current();
let request = self
.client
.get(url.as_str())
.header("Accept", accept)
.header("Date", Date(SystemTime::now().into()).to_string())
.signature(&self.config, self.key_id.clone(), move |signing_string| {
span.record("signing_string", signing_string);
span.in_scope(|| signer.sign(signing_string))
})
.await?;
let res = self.client.execute(request).await;
let res = self.check_response(url, strategy, res).await?;
Ok(res)
}
#[tracing::instrument(
"Deliver to Inbox",
skip_all,
fields(inbox = inbox.to_string().as_str(), signing_string)
)]
pub(crate) async fn deliver<T>(
&self,
inbox: &IriString,
item: &T,
strategy: BreakerStrategy,
) -> Result<(), Error>
where
T: serde::ser::Serialize + std::fmt::Debug,
{
self.do_deliver(
inbox,
item,
"application/activity+json",
"application/activity+json",
strategy,
)
.await?;
Ok(())
}
async fn do_deliver<T>(
&self,
inbox: &IriString,
item: &T,
content_type: &str,
accept: &str,
strategy: BreakerStrategy,
) -> Result<reqwest::Response, Error>
where
T: serde::ser::Serialize + std::fmt::Debug,
{
if !self.breakers.should_try(&inbox) {
return Err(ErrorKind::Breaker.into());
}
let signer = self.signer();
let span = tracing::Span::current();
let item_string = serde_json::to_string(item)?;
let client: Client = self.client.borrow().clone();
let res = client
let request = self
.client
.post(inbox.as_str())
.header("Accept", "application/activity+json")
.header("Content-Type", "application/activity+json")
.set(Date(SystemTime::now().into()))
.header("Accept", accept)
.header("Content-Type", content_type)
.header("Date", Date(SystemTime::now().into()).to_string())
.signature_with_digest(
self.config.clone(),
self.key_id.clone(),
Sha256::new(),
item_string,
move |signing_string| signer.sign(signing_string),
move |signing_string| {
span.record("signing_string", signing_string);
span.in_scope(|| signer.sign(signing_string))
},
)
.await?
.send()
.await;
.await?;
if res.is_err() {
self.count_err();
}
let res = self.client.execute(request).await;
let mut res = res.map_err(|e| MyError::SendRequest(inbox.to_string(), e.to_string()))?;
let res = self.check_response(inbox, strategy, res).await?;
self.reset_err();
if !res.status().is_success() {
if let Ok(bytes) = res.body().await {
if let Ok(s) = String::from_utf8(bytes.as_ref().to_vec()) {
if !s.is_empty() {
debug!("Response from {}, {}", inbox.as_str(), s);
}
}
}
return Err(MyError::Status(res.status()));
}
Ok(())
Ok(res)
}
fn signer(&self) -> Signer {
Signer {
private_key: self.private_key.clone(),
rng: self.rng.clone(),
}
}
}
struct Signer {
private_key: RSAPrivateKey,
private_key: Arc<RsaKeyPair>,
rng: SystemRandom,
}
impl Signer {
fn sign(&self, signing_string: &str) -> Result<String, MyError> {
let hashed = Sha256::digest(signing_string.as_bytes());
let bytes =
self.private_key
.sign(PaddingScheme::PKCS1v15, Some(&Hashes::SHA2_256), &hashed)?;
Ok(base64::encode(bytes))
fn sign(&self, signing_string: &str) -> Result<String, Error> {
let mut signature = vec![0; self.private_key.public().modulus_len()];
self.private_key
.sign(
&RSA_PKCS1_SHA256,
&self.rng,
signing_string.as_bytes(),
&mut signature,
)
.map_err(|_| ErrorKind::SignRequest)?;
Ok(STANDARD.encode(&signature))
}
}

View file

@ -1,12 +1,14 @@
mod actor;
mod healthz;
mod inbox;
mod index;
mod media;
mod nodeinfo;
mod statics;
pub use self::{
pub(crate) use self::{
actor::route as actor,
healthz::route as healthz,
inbox::route as inbox,
index::route as index,
media::route as media,
@ -23,7 +25,7 @@ fn ok<T>(item: T) -> HttpResponse
where
T: Serialize,
{
HttpResponse::Ok().content_type(CONTENT_TYPE).json(item)
HttpResponse::Ok().content_type(CONTENT_TYPE).json(&item)
}
fn accepted<T>(item: T) -> HttpResponse
@ -32,5 +34,5 @@ where
{
HttpResponse::Accepted()
.content_type(CONTENT_TYPE)
.json(item)
.json(&item)
}

View file

@ -2,30 +2,33 @@ use crate::{
apub::{PublicKey, PublicKeyInner},
config::{Config, UrlKind},
data::State,
error::MyError,
error::Error,
routes::ok,
};
use activitystreams_ext::Ext1;
use activitystreams_new::{
use activitystreams::{
actor::{ApActor, Application, Endpoints},
context,
prelude::*,
security,
};
use activitystreams_ext::Ext1;
use actix_web::{web, Responder};
use rsa_pem::KeyExt;
use rsa::pkcs8::EncodePublicKey;
pub async fn route(
#[tracing::instrument(name = "Actor", skip(config, state))]
pub(crate) async fn route(
state: web::Data<State>,
config: web::Data<Config>,
) -> Result<impl Responder, MyError> {
) -> Result<impl Responder, Error> {
let mut application = Ext1::new(
ApActor::new(config.generate_url(UrlKind::Inbox), Application::new()),
PublicKey {
public_key: PublicKeyInner {
id: config.generate_url(UrlKind::MainKey),
owner: config.generate_url(UrlKind::Actor),
public_key_pem: state.public_key.to_pem_pkcs8()?,
public_key_pem: state
.public_key
.to_public_key_pem(rsa::pkcs8::LineEnding::default())?,
},
},
);

7
src/routes/healthz.rs Normal file
View file

@ -0,0 +1,7 @@
use crate::{data::State, error::Error};
use actix_web::{web, HttpResponse};
pub(crate) async fn route(state: web::Data<State>) -> Result<HttpResponse, Error> {
state.db.check_health().await?;
Ok(HttpResponse::Ok().finish())
}

View file

@ -1,124 +1,134 @@
use crate::{
apub::{AcceptedActivities, AcceptedUndoObjects, UndoTypes, ValidTypes},
config::{Config, UrlKind},
data::{Actor, ActorCache, State},
error::MyError,
data::{ActorCache, State},
db::Actor,
error::{Error, ErrorKind},
jobs::apub::{Announce, Follow, Forward, Reject, Undo},
jobs::JobServer,
requests::Requests,
routes::accepted,
};
use activitystreams_new::{
activity,
base::AnyBase,
prelude::*,
primitives::{OneOrMany, XsdAnyUri},
use activitystreams::{
activity, base::AnyBase, iri_string::types::IriString, prelude::*, primitives::OneOrMany,
public,
};
use actix_web::{web, HttpResponse};
use futures::join;
use http_signature_normalization_actix::prelude::{DigestVerified, SignatureVerified};
use log::error;
pub async fn route(
#[tracing::instrument(name = "Inbox", skip_all, fields(id = tracing::field::debug(&input.id_unchecked()), kind = tracing::field::debug(&input.kind())))]
#[allow(clippy::too_many_arguments)]
pub(crate) async fn route(
state: web::Data<State>,
actors: web::Data<ActorCache>,
config: web::Data<Config>,
client: web::Data<Requests>,
jobs: web::Data<JobServer>,
input: web::Json<AcceptedActivities>,
verified: Option<(SignatureVerified, DigestVerified)>,
) -> Result<HttpResponse, MyError> {
digest_verified: Option<DigestVerified>,
signature_verified: Option<SignatureVerified>,
) -> Result<HttpResponse, Error> {
let input = input.into_inner();
let actor = actors
.get(
input.actor().as_single_id().ok_or(MyError::MissingId)?,
&client,
)
.await?
.into_inner();
let kind = input.kind().ok_or(ErrorKind::MissingKind)?;
let (is_blocked, is_whitelisted, is_listener) = join!(
state.is_blocked(&actor.id),
state.is_whitelisted(&actor.id),
state.is_listener(&actor.inbox)
);
if is_blocked {
return Err(MyError::Blocked(actor.id.to_string()));
if digest_verified.is_some() && signature_verified.is_none() && *kind == ValidTypes::Delete {
return Ok(accepted(serde_json::json!({})));
} else if config.validate_signatures()
&& (digest_verified.is_none() || signature_verified.is_none())
{
return Err(ErrorKind::NoSignature(None).into());
}
if !is_whitelisted {
return Err(MyError::Whitelist(actor.id.to_string()));
}
let actor_id = if input.id_unchecked().is_some() {
input.actor()?.as_single_id().ok_or(ErrorKind::MissingId)?
} else {
input
.actor_unchecked()
.as_single_id()
.ok_or(ErrorKind::MissingId)?
};
if !is_listener && !valid_without_listener(&input)? {
return Err(MyError::NotSubscribed(actor.inbox.to_string()));
}
let actor = actors.get(actor_id, &client).await?.into_inner();
if config.validate_signatures() && verified.is_none() {
return Err(MyError::NoSignature(actor.public_key_id.to_string()));
} else if config.validate_signatures() {
if let Some((verified, _)) = verified {
if actor.public_key_id.as_str() != verified.key_id() {
error!("Bad actor, more info: {:?}", input);
return Err(MyError::BadActor(
actor.public_key_id.to_string(),
verified.key_id().to_owned(),
));
}
if let Some(verified) = signature_verified {
if actor.public_key_id.as_str() != verified.key_id() {
tracing::error!("Actor signed with wrong key");
return Err(ErrorKind::BadActor(
actor.public_key_id.to_string(),
verified.key_id().to_owned(),
)
.into());
}
} else if config.validate_signatures() {
tracing::error!("This case should never be reachable, since I handle signature checks earlier in the flow. If you see this in a log it means I did it wrong");
return Err(ErrorKind::NoSignature(Some(actor.public_key_id.to_string())).into());
}
match input.kind().ok_or(MyError::MissingKind)? {
let is_allowed = state.db.is_allowed(actor.id.clone()).await?;
let is_connected = state.db.is_connected(actor.id.clone()).await?;
if !is_allowed {
return Err(ErrorKind::NotAllowed(actor.id.to_string()).into());
}
if !is_connected && !valid_without_listener(&input)? {
return Err(ErrorKind::NotSubscribed(actor.id.to_string()).into());
}
match kind {
ValidTypes::Accept => handle_accept(&config, input).await?,
ValidTypes::Reject => handle_reject(&config, &jobs, input, actor).await?,
ValidTypes::Announce | ValidTypes::Create => {
handle_announce(&state, &jobs, input, actor).await?
}
ValidTypes::Follow => handle_follow(&config, &jobs, input, actor, is_listener).await?,
ValidTypes::Delete | ValidTypes::Update => handle_forward(&jobs, input, actor).await?,
ValidTypes::Undo => handle_undo(&config, &jobs, input, actor, is_listener).await?,
ValidTypes::Follow => handle_follow(&config, &jobs, input, actor).await?,
ValidTypes::Add | ValidTypes::Delete | ValidTypes::Remove | ValidTypes::Update => {
handle_forward(&jobs, input, actor).await?
}
ValidTypes::Undo => handle_undo(&config, &jobs, input, actor, is_connected).await?,
};
Ok(accepted(serde_json::json!({})))
}
fn valid_without_listener(input: &AcceptedActivities) -> Result<bool, MyError> {
fn valid_without_listener(input: &AcceptedActivities) -> Result<bool, Error> {
match input.kind() {
Some(ValidTypes::Follow) => Ok(true),
Some(ValidTypes::Undo) => Ok(single_object(input.object())?.is_kind("Follow")),
Some(ValidTypes::Undo) => Ok(single_object(input.object_unchecked())?.is_kind("Follow")),
_ => Ok(false),
}
}
fn kind_str(base: &AnyBase) -> Result<&str, MyError> {
base.kind_str().ok_or(MyError::MissingKind)
fn kind_str(base: &AnyBase) -> Result<&str, Error> {
base.kind_str()
.ok_or(ErrorKind::MissingKind)
.map_err(Into::into)
}
fn id_string(id: Option<&XsdAnyUri>) -> Result<String, MyError> {
id.map(|s| s.to_string()).ok_or(MyError::MissingId)
fn id_string(id: Option<&IriString>) -> Result<String, Error> {
id.map(|s| s.to_string())
.ok_or(ErrorKind::MissingId)
.map_err(Into::into)
}
fn single_object(o: &OneOrMany<AnyBase>) -> Result<&AnyBase, MyError> {
o.as_one().ok_or(MyError::ObjectCount)
fn single_object(o: &OneOrMany<AnyBase>) -> Result<&AnyBase, Error> {
o.as_one().ok_or(ErrorKind::ObjectCount).map_err(Into::into)
}
async fn handle_accept(config: &Config, input: AcceptedActivities) -> Result<(), MyError> {
let base = single_object(input.object())?.clone();
async fn handle_accept(config: &Config, input: AcceptedActivities) -> Result<(), Error> {
let base = single_object(input.object_unchecked())?.clone();
let follow = if let Some(follow) = activity::Follow::from_any_base(base)? {
follow
} else {
return Err(MyError::Kind(
kind_str(single_object(input.object())?)?.to_owned(),
));
return Err(ErrorKind::Kind(
kind_str(single_object(input.object_unchecked())?)?.to_owned(),
)
.into());
};
if !follow.actor_is(&config.generate_url(UrlKind::Actor)) {
return Err(MyError::WrongActor(id_string(
follow.actor().as_single_id(),
)?));
return Err(ErrorKind::WrongActor(id_string(follow.actor()?.as_single_id())?).into());
}
Ok(())
@ -129,23 +139,22 @@ async fn handle_reject(
jobs: &JobServer,
input: AcceptedActivities,
actor: Actor,
) -> Result<(), MyError> {
let base = single_object(input.object())?.clone();
) -> Result<(), Error> {
let base = single_object(input.object_unchecked())?.clone();
let follow = if let Some(follow) = activity::Follow::from_any_base(base)? {
follow
} else {
return Err(MyError::Kind(
kind_str(single_object(input.object())?)?.to_owned(),
));
return Err(ErrorKind::Kind(
kind_str(single_object(input.object_unchecked())?)?.to_owned(),
)
.into());
};
if !follow.actor_is(&config.generate_url(UrlKind::Actor)) {
return Err(MyError::WrongActor(id_string(
follow.actor().as_single_id(),
)?));
return Err(ErrorKind::WrongActor(id_string(follow.actor()?.as_single_id())?).into());
}
jobs.queue(Reject(actor))?;
jobs.queue(Reject(actor)).await?;
Ok(())
}
@ -156,33 +165,34 @@ async fn handle_undo(
input: AcceptedActivities,
actor: Actor,
is_listener: bool,
) -> Result<(), MyError> {
let any_base = single_object(input.object())?.clone();
) -> Result<(), Error> {
let any_base = single_object(input.object_unchecked())?.clone();
let undone_object =
AcceptedUndoObjects::from_any_base(any_base)?.ok_or(MyError::ObjectFormat)?;
AcceptedUndoObjects::from_any_base(any_base)?.ok_or(ErrorKind::ObjectFormat)?;
if !undone_object.is_kind(&UndoTypes::Follow) {
if is_listener {
jobs.queue(Forward::new(input, actor))?;
jobs.queue(Forward::new(input, actor)).await?;
return Ok(());
} else {
return Err(MyError::NotSubscribed(actor.inbox.to_string()));
return Err(ErrorKind::NotSubscribed(actor.id.to_string()).into());
}
}
let my_id: XsdAnyUri = config.generate_url(UrlKind::Actor);
let my_id: IriString = config.generate_url(UrlKind::Actor);
if !undone_object.object_is(&my_id) && !undone_object.object_is(&public()) {
return Err(MyError::WrongActor(id_string(
undone_object.object().as_single_id(),
)?));
return Err(ErrorKind::WrongActor(id_string(
undone_object.object_unchecked().as_single_id(),
)?)
.into());
}
if !is_listener {
return Ok(());
}
jobs.queue(Undo::new(input, actor))?;
jobs.queue(Undo::new(input, actor)).await?;
Ok(())
}
@ -190,8 +200,8 @@ async fn handle_forward(
jobs: &JobServer,
input: AcceptedActivities,
actor: Actor,
) -> Result<(), MyError> {
jobs.queue(Forward::new(input, actor))?;
) -> Result<(), Error> {
jobs.queue(Forward::new(input, actor)).await?;
Ok(())
}
@ -201,14 +211,18 @@ async fn handle_announce(
jobs: &JobServer,
input: AcceptedActivities,
actor: Actor,
) -> Result<(), MyError> {
let object_id = input.object().as_single_id().ok_or(MyError::MissingId)?;
) -> Result<(), Error> {
let object_id = input
.object_unchecked()
.as_single_id()
.ok_or(ErrorKind::MissingId)?;
if state.is_cached(object_id).await {
return Err(MyError::Duplicate);
if state.is_cached(object_id) {
return Err(ErrorKind::Duplicate.into());
}
jobs.queue(Announce::new(object_id.to_owned(), actor))?;
jobs.queue(Announce::new(object_id.to_owned(), actor))
.await?;
Ok(())
}
@ -218,17 +232,16 @@ async fn handle_follow(
jobs: &JobServer,
input: AcceptedActivities,
actor: Actor,
is_listener: bool,
) -> Result<(), MyError> {
let my_id: XsdAnyUri = config.generate_url(UrlKind::Actor);
) -> Result<(), Error> {
let my_id: IriString = config.generate_url(UrlKind::Actor);
if !input.object_is(&my_id) && !input.object_is(&public()) {
return Err(MyError::WrongActor(id_string(
input.object().as_single_id(),
)?));
return Err(
ErrorKind::WrongActor(id_string(input.object_unchecked().as_single_id())?).into(),
);
}
jobs.queue(Follow::new(is_listener, input, actor))?;
jobs.queue(Follow::new(input, actor)).await?;
Ok(())
}

View file

@ -1,22 +1,91 @@
use crate::{config::Config, data::State, error::MyError};
use crate::{
config::Config,
data::{Node, State},
error::{Error, ErrorKind},
};
use actix_web::{web, HttpResponse};
use log::error;
use rand::{seq::SliceRandom, thread_rng};
use std::io::BufWriter;
pub async fn route(
const MINIFY_CONFIG: minify_html::Cfg = minify_html::Cfg {
do_not_minify_doctype: true,
ensure_spec_compliant_unquoted_attribute_values: true,
keep_closing_tags: true,
keep_html_and_head_opening_tags: false,
keep_spaces_between_attributes: true,
keep_comments: false,
keep_input_type_text_attr: true,
keep_ssi_comments: false,
preserve_brace_template_syntax: false,
preserve_chevron_percent_template_syntax: false,
minify_css: true,
minify_js: true,
remove_bangs: true,
remove_processing_instructions: true,
};
fn open_reg(node: &Node) -> bool {
node.instance
.as_ref()
.map(|i| i.reg)
.or_else(|| node.info.as_ref().map(|i| i.reg))
.unwrap_or(false)
}
#[tracing::instrument(name = "Index", skip(config, state))]
pub(crate) async fn route(
state: web::Data<State>,
config: web::Data<Config>,
) -> Result<HttpResponse, MyError> {
let mut nodes = state.node_cache().nodes().await;
nodes.shuffle(&mut thread_rng());
) -> Result<HttpResponse, Error> {
let all_nodes = state.node_cache.nodes().await?;
let mut nodes = Vec::new();
let mut local = Vec::new();
for node in all_nodes {
if !state.is_connected(&node.base) {
continue;
}
if node
.base
.authority_str()
.map(|authority| {
config
.local_domains()
.iter()
.any(|domain| domain.as_str() == authority)
})
.unwrap_or(false)
{
local.push(node);
} else {
nodes.push(node);
}
}
nodes.sort_by(|lhs, rhs| match (open_reg(lhs), open_reg(rhs)) {
(true, true) | (false, false) => std::cmp::Ordering::Equal,
(true, false) => std::cmp::Ordering::Less,
(false, true) => std::cmp::Ordering::Greater,
});
if let Some((i, _)) = nodes.iter().enumerate().find(|(_, node)| !open_reg(node)) {
nodes[..i].shuffle(&mut thread_rng());
nodes[i..].shuffle(&mut thread_rng());
} else {
nodes.shuffle(&mut thread_rng());
}
let mut buf = BufWriter::new(Vec::new());
crate::templates::index(&mut buf, &nodes, &config)?;
let buf = buf.into_inner().map_err(|e| {
error!("Error rendering template, {}", e.error());
MyError::FlushBuffer
crate::templates::index_html(&mut buf, &local, &nodes, &config)?;
let html = buf.into_inner().map_err(|e| {
tracing::error!("Error rendering template, {}", e.error());
ErrorKind::FlushBuffer
})?;
Ok(HttpResponse::Ok().content_type("text/html").body(buf))
let html = minify_html::minify(&html, &MINIFY_CONFIG);
Ok(HttpResponse::Ok().content_type("text/html").body(html))
}

View file

@ -1,42 +1,42 @@
use crate::{data::Media, error::MyError, requests::Requests};
use actix_web::{
http::header::{CacheControl, CacheDirective},
web, HttpResponse,
use crate::{
data::MediaCache,
error::Error,
requests::{BreakerStrategy, Requests},
stream::limit_stream,
};
use bytes::Bytes;
use actix_web::{body::BodyStream, web, HttpResponse};
use uuid::Uuid;
pub async fn route(
media: web::Data<Media>,
// 16 MB
const IMAGE_SIZE_LIMIT: usize = 16 * 1024 * 1024;
#[tracing::instrument(name = "Media", skip(media, requests))]
pub(crate) async fn route(
media: web::Data<MediaCache>,
requests: web::Data<Requests>,
uuid: web::Path<Uuid>,
) -> Result<HttpResponse, MyError> {
) -> Result<HttpResponse, Error> {
let uuid = uuid.into_inner();
if let Some((content_type, bytes)) = media.get_bytes(uuid).await {
return Ok(cached(content_type, bytes));
}
if let Some(url) = media.get_url(uuid).await? {
let (content_type, bytes) = requests.fetch_bytes(url.as_str()).await?;
let res = requests
.fetch_response(&url, BreakerStrategy::Allow404AndBelow)
.await?;
media
.store_bytes(uuid, content_type.clone(), bytes.clone())
.await;
let mut response = HttpResponse::build(crate::http1::status_to_http02(res.status()));
return Ok(cached(content_type, bytes));
for (name, value) in res.headers().iter().filter(|(h, _)| *h != "connection") {
response.insert_header((
crate::http1::name_to_http02(name),
crate::http1::value_to_http02(value),
));
}
return Ok(response.body(BodyStream::new(limit_stream(
res.bytes_stream(),
IMAGE_SIZE_LIMIT,
))));
}
Ok(HttpResponse::NotFound().finish())
}
fn cached(content_type: String, bytes: Bytes) -> HttpResponse {
HttpResponse::Ok()
.set(CacheControl(vec![
CacheDirective::Public,
CacheDirective::MaxAge(60 * 60 * 24),
CacheDirective::Extension("immutable".to_owned(), None),
]))
.content_type(content_type)
.body(bytes)
}

View file

@ -5,7 +5,8 @@ use crate::{
use actix_web::{web, Responder};
use actix_webfinger::Link;
pub async fn well_known(config: web::Data<Config>) -> impl Responder {
#[tracing::instrument(name = "Well Known NodeInfo", skip(config))]
pub(crate) async fn well_known(config: web::Data<Config>) -> impl Responder {
web::Json(Links {
links: vec![Link {
rel: "http://nodeinfo.diaspora.software/ns/schema/2.0".to_owned(),
@ -14,7 +15,8 @@ pub async fn well_known(config: web::Data<Config>) -> impl Responder {
kind: None,
}],
})
.with_header("Content-Type", "application/jrd+json")
.customize()
.insert_header(("Content-Type", "application/jrd+json"))
}
#[derive(serde::Serialize)]
@ -22,19 +24,40 @@ struct Links {
links: Vec<Link>,
}
pub async fn route(config: web::Data<Config>, state: web::Data<State>) -> web::Json<NodeInfo> {
#[tracing::instrument(name = "NodeInfo", skip_all)]
pub(crate) async fn route(
config: web::Data<Config>,
state: web::Data<State>,
) -> web::Json<NodeInfo> {
let inboxes = state.db.inboxes().await;
let blocks = if config.publish_blocks() {
Some(state.db.blocks().await.unwrap_or_default())
} else {
None
};
let peers = inboxes
.unwrap_or_default()
.iter()
.filter_map(|listener| listener.authority_str())
.map(|s| s.to_owned())
.collect();
let open_registrations = !config.restricted_mode();
web::Json(NodeInfo {
version: NodeInfoVersion,
software: Software {
name: config.software_name().to_lowercase(),
version: config.software_version(),
name: Config::software_name().to_lowercase(),
version: Config::software_version(),
},
protocols: vec![Protocol::ActivityPub],
services: Services {
inbound: vec![],
outbound: vec![],
},
open_registrations: false,
open_registrations,
usage: Usage {
users: Users {
total: 1,
@ -44,20 +67,7 @@ pub async fn route(config: web::Data<Config>, state: web::Data<State>) -> web::J
local_posts: 0,
local_comments: 0,
},
metadata: Metadata {
peers: state
.listeners()
.await
.iter()
.filter_map(|listener| listener.as_url().domain())
.map(|s| s.to_owned())
.collect(),
blocks: if config.publish_blocks() {
Some(state.blocks().await)
} else {
None
},
},
metadata: Metadata { peers, blocks },
})
}

Some files were not shown because too many files have changed in this diff Show more