Compare commits

..

99 commits

Author SHA1 Message Date
asonix
6ff7b59778 Prepare 0.3.116 2024-12-09 20:59:14 -06:00
asonix
d9da352558 Update teloxide 2024-12-09 19:40:33 -06:00
asonix
aea64c726a Update opentelemetry stack 2024-12-09 19:37:35 -06:00
asonix
e243bd4600 Update bcrypt 2024-12-09 19:30:59 -06:00
asonix
a452fb91ba Fix build due to reqwest-tracing semver break 2024-12-09 19:30:30 -06:00
asonix
35acc916f2 flake: Use nixos-24.11 stable 2024-12-09 19:23:49 -06:00
asonix
752067ffb7 Update dependencies (minor & point) 2024-08-05 16:45:32 -05:00
asonix
b308e080af Update console-subscriber 2024-08-05 16:44:39 -05:00
asonix
6ab37dc06f Update opentelemetry stack 2024-08-05 16:43:51 -05:00
asonix
a23b30cc91 Bump version 2024-07-09 16:45:38 -05:00
asonix
1b58a50d44 Merge pull request 'Start upgrading to hyper and http 1' (#3) from asonix/hyper-1 into main
Reviewed-on: https://git.asonix.dog/asonix/relay/pulls/3
2024-07-09 21:39:10 +00:00
asonix
308a945283 Start upgrading to http1 2024-07-09 16:32:05 -05:00
asonix
86cab5d2d9 Update opentelemetry stack 2024-07-09 16:28:00 -05:00
asonix
a70e75665b Update console-subscriber 2024-07-09 16:26:33 -05:00
asonix
f1792c8eb3 Update dashmap 2024-07-09 16:26:00 -05:00
asonix
d918ef1495 Update rustls 2024-07-09 16:24:44 -05:00
asonix
2870789e1f Update background jobs, async-cpupool, metrics 2024-07-09 16:21:53 -05:00
asonix
cda92e7523 Update flake 2024-06-23 13:57:40 -05:00
asonix
43b03a176c Don't fail publish on clippy warnings
unfixable without ructe release
2024-06-23 13:57:28 -05:00
asonix
a465d1ae5b Allow versions to be unused 2024-06-23 13:56:37 -05:00
asonix
4fa7674a35 Move cargo config to config.toml 2024-06-23 13:55:10 -05:00
asonix
8c14d613f7 Prepare v0.3.114 2024-06-23 13:45:10 -05:00
asonix
aff2431681 Update dependencies (minor & point) 2024-06-23 13:42:26 -05:00
asonix
5aa97212b3 Impose limits on the size of downloaded content from foreign servers 2024-06-23 13:35:24 -05:00
asonix
97567cf598 Prepare v0.3.113 2024-05-01 15:45:53 -05:00
asonix
4c663f399e Update dependencies (minor & point) 2024-05-01 15:43:53 -05:00
asonix
8a3256f52a Avoid deadlock of iterating over tree while transacting on that tree 2024-05-01 15:43:08 -05:00
asonix
13a2653fe8 Remove prerelease flag 2024-04-23 14:00:04 -05:00
asonix
8dd9a86d22 Use match_pattern rather than path for metrics differentiation 2024-04-21 11:44:16 -05:00
asonix
5c0c0591dd Prepare 0.3.112 2024-04-14 22:47:38 -05:00
asonix
04ca4e5401 Stable async-cpupool 2024-04-14 19:53:31 -05:00
asonix
1de1d76506 prerelease 2024-04-13 13:57:12 -05:00
asonix
dd9225bb89 Prepare v0.3.111 2024-04-07 11:53:24 -05:00
asonix
b577730836 Fix build 2024-04-07 11:40:57 -05:00
asonix
21883c168b BROKEN! Start collecting more metrics about various sizes 2024-04-07 11:04:03 -05:00
asonix
76a0c79369 Update base64, ammonia 2024-04-06 13:42:29 -05:00
asonix
6444782db9 Bump version, Update dependencies (minor & point) 2024-04-06 13:34:54 -05:00
asonix
14aea3256d Update dependencies (minor & point) 2024-03-23 19:10:13 -05:00
asonix
f4f2aa2025 Update flake 2024-03-23 19:09:53 -05:00
asonix
615271fe80 Update opentelemetry dependencies, other dependencies minor & point 2024-03-10 20:09:16 -05:00
asonix
4aed601664 No console by default 2024-02-25 21:08:17 -06:00
asonix
bf21f05aca Strip release binaries 2024-02-12 15:16:20 -06:00
asonix
e69f6c6edb Remove prerelease marker 2024-02-12 14:32:46 -06:00
asonix
1e05eb4fe4 Bump version 2024-02-12 13:46:44 -06:00
asonix
7f09ac3edd Update dependencies (minor & point) 2024-02-12 13:42:45 -06:00
asonix
4788ad332a Update image version in docker-compose 2024-02-11 14:56:26 -06:00
asonix
1fd82915d3 Remove bad argument 2024-02-11 14:52:57 -06:00
asonix
0472082a97 Add actions, remove drone 2024-02-11 14:49:22 -06:00
asonix
c8250acce7 Bump version 2024-02-05 00:25:15 -06:00
asonix
b074759eb4 Update background-jobs, rework errors 2024-02-05 00:24:49 -06:00
asonix
ed399f1531 Be more accurate for reqwest errors 2024-02-04 20:51:25 -06:00
asonix
7e39acdcb0 Update config 2024-02-04 20:28:18 -06:00
asonix
894d096622 Bump version 2024-02-04 20:25:59 -06:00
asonix
05e31254ba Update rustls for actix-web, log less 2024-02-04 20:25:50 -06:00
asonix
086ca9fbf2 Support live-reloading TLS certificate 2024-01-31 16:49:23 -06:00
asonix
603fcc6e57 Bump version 2024-01-18 13:35:00 -05:00
asonix
6b8f15ee08 Use stable background-jobs 2024-01-18 13:34:10 -05:00
asonix
53939f8ae8 Go back to job-server per core 2024-01-18 12:31:26 -05:00
asonix
b53b34c515 Update dependencies (minor & point) 2024-01-14 16:16:56 -05:00
asonix
6dcdf2fc87 clippy 2024-01-14 16:10:32 -05:00
asonix
83e5619eb4 Update flake.lock 2024-01-14 16:10:19 -05:00
asonix
9090bb5c62 Bump version 2024-01-14 15:59:16 -05:00
asonix
d862bf8106 Use tokio rather than actix-rt 2024-01-14 15:56:07 -05:00
asonix
417553e643 Bump version 2024-01-09 18:09:51 -06:00
asonix
a2456c3d5f Update dependencies (minor & point) 2024-01-09 18:08:10 -06:00
asonix
2b3cb8db92 clippy 2024-01-08 17:10:31 -06:00
asonix
18f1096221 Update version 2024-01-08 17:06:02 -06:00
asonix
c640567206 Update to newest background-jobs, implement Job rather than ActixJob 2024-01-08 17:00:15 -06:00
asonix
36aa9120ea Update metrics 2024-01-07 12:43:58 -06:00
asonix
e377f3988b Update minify-html, dependencies (minor & point) 2024-01-07 12:10:43 -06:00
asonix
8c811710ac Bump version 2023-11-25 21:27:05 -06:00
asonix
e4f665d75f use stable async-cpupool 2023-11-25 21:17:59 -06:00
asonix
4383357abe update flake 2023-11-25 20:27:20 -06:00
asonix
f70af22c6a clippy 2023-11-25 20:27:11 -06:00
asonix
8bce3d172f Update streem 2023-11-25 20:20:38 -06:00
asonix
8540e93469 Use async-cpupool 2023-11-25 20:18:11 -06:00
asonix
708e7da301 Update opentelemetry, ring, http-signature-normalization, tracing-log 2023-11-25 20:16:13 -06:00
asonix
a0f9827e18 Bump version 2023-09-09 18:10:31 -04:00
asonix
9ebed87cde Update http-signature-normalization-actix 2023-09-09 18:09:24 -04:00
asonix
ae3d19a774 Bump version 2023-09-09 17:31:42 -04:00
asonix
2a5e769afb Update http-signature-normalization-actix 2023-09-09 17:30:07 -04:00
asonix
f4839d688e Update dependencies (minor & point) 2023-09-09 16:52:53 -04:00
asonix
206db2079f Remove futures-util dependency 2023-09-09 16:46:22 -04:00
asonix
6714fe48ed Bump version, enable tokio_unstable for console 2023-09-08 19:15:19 -06:00
asonix
804d22ee81 Enable different breaker failure cases for different endpoints
Additionally, don't count 4xx towards succeeding a breaker
2023-09-08 19:11:24 -06:00
asonix
5a6fbbcb77 Update tracing-opentelemetry 2023-09-08 18:41:55 -06:00
asonix
ea926f73c4 Update dependencies (minor & point) 2023-09-08 18:39:37 -06:00
asonix
53b14c3329 Bump version 2023-08-29 23:05:41 -05:00
asonix
9b1fad0e2e Update rustls 2023-08-29 22:15:41 -05:00
asonix
a8ba53fe11 Update flake 2023-08-26 12:20:36 -05:00
asonix
927fb91a5e Update flume 2023-08-17 17:11:08 -05:00
asonix
4d4093c15a Bump version 2023-08-17 17:10:24 -05:00
asonix
75df271b58 Switch from awc to reqwest, enable HTTP Proxies 2023-08-17 17:09:35 -05:00
asonix
73b429ab51 Update opentelemetry 2023-08-05 12:47:52 -05:00
asonix
2f57c855a4 Bump version 2023-08-04 19:01:05 -05:00
asonix
cdbde9519e Update dependencies (minor & point) 2023-08-04 18:57:53 -05:00
asonix
2cbe4864c3 Switch to ring for crypto 2023-08-04 18:57:53 -05:00
asonix
731a831070 Bump version 2023-07-28 17:47:51 -05:00
asonix
795d3238ad Hide nodes that failed breakers from index page 2023-07-28 17:46:23 -05:00
51 changed files with 3726 additions and 2328 deletions

View file

@ -1,2 +0,0 @@
[build]
# rustflags = ["--cfg", "tokio_unstable"]

2
.cargo/config.toml Normal file
View file

@ -0,0 +1,2 @@
[build]
rustflags = ["--cfg", "tokio_unstable"]

View file

@ -1,421 +0,0 @@
kind: pipeline
type: docker
name: clippy
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: clippy
image: asonix/rust-builder:latest-linux-amd64
pull: always
commands:
- rustup component add clippy
- cargo clippy --no-deps -- -D warnings
trigger:
event:
- push
- pull_request
- tag
---
kind: pipeline
type: docker
name: tests
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: tests
image: asonix/rust-builder:latest-linux-amd64
pull: always
commands:
- cargo test
trigger:
event:
- push
- pull_request
- tag
---
kind: pipeline
type: docker
name: check-amd64
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: check
image: asonix/rust-builder:latest-linux-amd64
pull: always
commands:
- cargo check --target=$TARGET
trigger:
event:
- push
- pull_request
---
kind: pipeline
type: docker
name: build-amd64
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: build
image: asonix/rust-builder:latest-linux-amd64
pull: always
commands:
- cargo build --target=$TARGET --release
- $TOOL-strip target/$TARGET/release/relay
- cp target/$TARGET/release/relay .
- cp relay relay-linux-amd64
- name: push
image: plugins/docker:20
settings:
username: asonix
password:
from_secret: dockerhub_token
repo: asonix/relay
dockerfile: docker/drone/Dockerfile
auto_tag: true
auto_tag_suffix: linux-amd64
build_args:
- REPO_ARCH=amd64
- name: publish
image: plugins/gitea-release:1
settings:
api_key:
from_secret: gitea_token
base_url: https://git.asonix.dog
files:
- relay-linux-amd64
depends_on:
- clippy
- tests
trigger:
event:
- tag
---
kind: pipeline
type: docker
name: check-arm64v8
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: check
image: asonix/rust-builder:latest-linux-arm64v8
pull: always
commands:
- cargo check --target=$TARGET
trigger:
event:
- push
- pull_request
---
kind: pipeline
type: docker
name: build-arm64v8
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: build
image: asonix/rust-builder:latest-linux-arm64v8
pull: always
commands:
- cargo build --target=$TARGET --release
- $TOOL-strip target/$TARGET/release/relay
- cp target/$TARGET/release/relay .
- cp relay relay-linux-arm64v8
- name: push
image: plugins/docker:20
settings:
username: asonix
password:
from_secret: dockerhub_token
repo: asonix/relay
dockerfile: docker/drone/Dockerfile
auto_tag: true
auto_tag_suffix: linux-arm64v8
build_args:
- REPO_ARCH=arm64v8
- name: publish
image: plugins/gitea-release:1
settings:
api_key:
from_secret: gitea_token
base_url: https://git.asonix.dog
files:
- relay-linux-arm64v8
depends_on:
- clippy
- tests
trigger:
event:
- tag
---
kind: pipeline
type: docker
name: check-arm32v7
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: check
image: asonix/rust-builder:latest-linux-arm32v7
pull: always
commands:
- cargo check --target=$TARGET
trigger:
event:
- push
- pull_request
---
kind: pipeline
type: docker
name: build-arm32v7
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: build
image: asonix/rust-builder:latest-linux-arm32v7
pull: always
commands:
- cargo build --target=$TARGET --release
- $TOOL-strip target/$TARGET/release/relay
- cp target/$TARGET/release/relay .
- cp relay relay-linux-arm32v7
- name: push
image: plugins/docker:20
settings:
username: asonix
password:
from_secret: dockerhub_token
repo: asonix/relay
dockerfile: docker/drone/Dockerfile
auto_tag: true
auto_tag_suffix: linux-arm32v7
build_args:
- REPO_ARCH=arm32v7
- name: publish
image: plugins/gitea-release:1
settings:
api_key:
from_secret: gitea_token
base_url: https://git.asonix.dog
files:
- relay-linux-arm32v7
depends_on:
- clippy
- tests
trigger:
event:
- tag
---
kind: pipeline
type: docker
name: manifest
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: manifest
image: plugins/manifest:1
settings:
username: asonix
password:
from_secret: dockerhub_token
dump: true
auto_tag: true
ignore_missing: true
spec: docker/drone/manifest.tmpl
depends_on:
- build-amd64
- build-arm64v8
- build-arm32v7
trigger:
event:
- tag
---
kind: pipeline
type: docker
name: publish-crate
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: publish
image: asonix/rust-builder:latest-linux-amd64
pull: always
environment:
CRATES_IO_TOKEN:
from_secret: crates_io_token
commands:
- cargo publish --token $CRATES_IO_TOKEN
depends_on:
- build-amd64
- build-arm64v8
- build-arm32v7
trigger:
event:
- tag

View file

@ -0,0 +1,61 @@
on:
push:
branches:
- '*'
pull_request:
branches:
- main
jobs:
clippy:
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Clippy
run: |
cargo clippy --no-default-features -- -D warnings
tests:
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Test
run: cargo test
check:
strategy:
fail-fast: false
matrix:
target:
- x86_64-unknown-linux-musl
- armv7-unknown-linux-musleabihf
- aarch64-unknown-linux-musl
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Debug builds
run: cargo zigbuild --target ${{ matrix.target }}

View file

@ -0,0 +1,226 @@
on:
push:
tags:
- 'v*.*.*'
env:
REGISTRY_IMAGE: asonix/relay
jobs:
clippy:
runs-on: base-image
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Clippy
run: |
# cargo clippy --no-default-features -- -D warnings
cargo clippy --no-default-features
tests:
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Test
run: cargo test
build:
needs:
- clippy
- tests
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
strategy:
fail-fast: false
matrix:
info:
- target: x86_64-unknown-linux-musl
artifact: linux-amd64
platform: linux/amd64
- target: armv7-unknown-linux-musleabihf
artifact: linux-arm32v7
platform: linux/arm/v7
- target: aarch64-unknown-linux-musl
artifact: linux-arm64v8
platform: linux/arm64
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Prepare Platform
run: |
platform=${{ matrix.info.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
shell: bash
-
name: Docker meta
id: meta
uses: https://github.com/docker/metadata-action@v5
with:
images: ${{ env.REGISTRY_IMAGE }}
flavor: |
latest=auto
suffix=-${{ matrix.info.artifact }}
tags: |
type=raw,value=latest,enable={{ is_default_branch }}
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
-
name: Set up QEMU
uses: https://github.com/docker/setup-qemu-action@v3
-
name: Set up Docker Buildx
uses: https://github.com/docker/setup-buildx-action@v3
-
name: Docker login
uses: https://github.com/docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
name: Compile relay
run: cargo zigbuild --target ${{ matrix.info.target }} --release
-
name: Prepare artifacts
run: |
mkdir artifacts
cp target/${{ matrix.info.target }}/release/relay artifacts/relay-${{ matrix.info.artifact }}
-
uses: https://github.com/actions/upload-artifact@v3
with:
name: binaries
path: artifacts/
-
name: Prepare binary
run: |
cp target/${{ matrix.info.target }}/release/relay docker/forgejo/relay
-
name: Build and push ${{ matrix.info.platform }} docker image
id: build
uses: docker/build-push-action@v5
with:
context: ./docker/forgejo
platforms: ${{ matrix.info.platform }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
outputs: type=image,name=${{ env.REGISTRY_IMAGE }},name-canonical=true,push=true
-
name: Export digest
run: |
mkdir -p /tmp/digests
digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${digest#sha256:}"
echo "Created /tmp/digests/${digest#sha256:}"
shell: bash
-
name: Upload ${{ matrix.info.platform }} digest
uses: https://github.com/actions/upload-artifact@v3
with:
name: digests
path: /tmp/digests/*
if-no-files-found: error
retention-days: 1
publish-docker:
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
needs: [build]
steps:
-
name: Download digests
uses: https://github.com/actions/download-artifact@v3
with:
name: digests
path: /tmp/digests
pattern: digests-*
merge-multiple: true
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
-
name: Docker login
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
name: Docker meta
id: meta
uses: https://github.com/docker/metadata-action@v5
with:
images: ${{ env.REGISTRY_IMAGE }}
flavor: |
latest=auto
tags: |
type=raw,value=latest,enable={{ is_default_branch }}
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
-
name: Create manifest list and push
working-directory: /tmp/digests
run: |
tags=$(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "${DOCKER_METADATA_OUTPUT_JSON}")
images=$(printf "${{ env.REGISTRY_IMAGE }}@sha256:%s " *)
echo "Running 'docker buildx imagetools create ${tags[@]} ${images[@]}'"
docker buildx imagetools create ${tags[@]} ${images[@]}
shell: bash
-
name: Inspect Image
run: |
docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ steps.meta.outputs.version }}
publish-forgejo:
needs: [build]
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
- uses: https://github.com/actions/download-artifact@v3
with:
name: binaries
path: artifacts/
merge-multiple: true
- uses: actions/forgejo-release@v1
with:
direction: upload
token: ${{ secrets.GITHUB_TOKEN }}
release-dir: artifacts/
publish-crate:
needs: [build]
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Publish Crate
run: cargo publish --token ${{ secrets.CRATES_IO_TOKEN }}

3499
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,11 +1,11 @@
[package] [package]
name = "ap-relay" name = "ap-relay"
description = "A simple activitypub relay" description = "A simple activitypub relay"
version = "0.3.97" version = "0.3.116"
authors = ["asonix <asonix@asonix.dog>"] authors = ["asonix <asonix@asonix.dog>"]
license = "AGPL-3.0" license = "AGPL-3.0"
readme = "README.md" readme = "README.md"
repository = "https://git.asonix.dog/asonix/ap-relay" repository = "https://git.asonix.dog/asonix/relay"
keywords = ["activitypub", "relay"] keywords = ["activitypub", "relay"]
edition = "2021" edition = "2021"
build = "src/build.rs" build = "src/build.rs"
@ -14,93 +14,101 @@ build = "src/build.rs"
name = "relay" name = "relay"
path = "src/main.rs" path = "src/main.rs"
[profile.release]
strip = true
[features] [features]
console = ["console-subscriber"] console = ["dep:console-subscriber"]
default = [] default = []
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
anyhow = "1.0" actix-web = { version = "4.4.0", default-features = false, features = ["compress-brotli", "compress-gzip", "rustls-0_23"] }
actix-rt = "2.7.0" actix-webfinger = { version = "0.5.0", default-features = false }
actix-web = { version = "4.0.1", default-features = false, features = [
"rustls",
"compress-brotli",
"compress-gzip",
] }
actix-webfinger = "0.4.0"
activitystreams = "0.7.0-alpha.25" activitystreams = "0.7.0-alpha.25"
activitystreams-ext = "0.1.0-alpha.3" activitystreams-ext = "0.1.0-alpha.3"
ammonia = "3.1.0" ammonia = "4.0.0"
awc = { version = "3.0.0", default-features = false, features = ["rustls"] } async-cpupool = "0.3.0"
bcrypt = "0.15" bcrypt = "0.16"
base64 = "0.21" base64 = "0.22"
clap = { version = "4.0.0", features = ["derive"] } clap = { version = "4.0.0", features = ["derive"] }
config = "0.13.0" color-eyre = "0.6.2"
console-subscriber = { version = "0.1", optional = true } config = { version = "0.14.0", default-features = false, features = ["toml", "json", "yaml"] }
dashmap = "5.1.0" console-subscriber = { version = "0.4", optional = true }
dashmap = "6.0.1"
dotenv = "0.15.0" dotenv = "0.15.0"
flume = "0.10.14" futures-core = "0.3.30"
futures-util = "0.3.17" lru = "0.12.0"
lru = "0.11.0" metrics = "0.23.0"
metrics = "0.21.0" metrics-exporter-prometheus = { version = "0.15.0", default-features = false, features = [
metrics-exporter-prometheus = { version = "0.12.0", default-features = false, features = [
"http-listener", "http-listener",
] } ] }
metrics-util = "0.15.0" metrics-util = "0.17.0"
mime = "0.3.16" mime = "0.3.16"
minify-html = "0.11.0" minify-html = "0.15.0"
opentelemetry = { version = "0.19", features = ["rt-tokio"] } opentelemetry = "0.27.1"
opentelemetry-otlp = "0.12" opentelemetry_sdk = { version = "0.27", features = ["rt-tokio"] }
opentelemetry-otlp = { version = "0.27", features = ["grpc-tonic"] }
pin-project-lite = "0.2.9" pin-project-lite = "0.2.9"
quanta = "0.11.0" # pinned to metrics-util
quanta = "0.12.0"
rand = "0.8" rand = "0.8"
rsa = { version = "0.9", features = ["sha2"] } reqwest = { version = "0.12", default-features = false, features = ["rustls-tls", "stream"]}
reqwest-middleware = { version = "0.4", default-features = false, features = ["json"] }
reqwest-tracing = "0.5.0"
ring = "0.17.5"
rsa = "0.9"
rsa-magic-public-key = "0.8.0" rsa-magic-public-key = "0.8.0"
rustls = "0.20.7" rustls = { version = "0.23.0", default-features = false, features = ["ring", "logging", "std", "tls12"] }
rustls-pemfile = "1.0.1" rustls-channel-resolver = "0.3.0"
rustls-pemfile = "2"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0" serde_json = "1.0"
sled = "0.34.7" sled = "0.34.7"
teloxide = { version = "0.12.0", default-features = false, features = [ streem = "0.2.0"
teloxide = { version = "0.13.0", default-features = false, features = [
"ctrlc_handler", "ctrlc_handler",
"macros", "macros",
"rustls", "rustls",
] } ] }
thiserror = "1.0" thiserror = "2.0"
time = { version = "0.3.17", features = ["serde"] } time = { version = "0.3.17", features = ["serde"] }
tracing = "0.1" tracing = "0.1"
tracing-awc = "0.1.7"
tracing-error = "0.2" tracing-error = "0.2"
tracing-futures = "0.2" tracing-log = "0.2"
tracing-log = "0.1" tracing-opentelemetry = "0.28"
tracing-opentelemetry = "0.19"
tracing-subscriber = { version = "0.3", features = [ tracing-subscriber = { version = "0.3", features = [
"ansi", "ansi",
"env-filter", "env-filter",
"fmt", "fmt",
] } ] }
tokio = { version = "1", features = ["macros", "sync"] } tokio = { version = "1", features = ["full", "tracing"] }
uuid = { version = "1", features = ["v4", "serde"] } uuid = { version = "1", features = ["v4", "serde"] }
[dependencies.background-jobs] [dependencies.background-jobs]
version = "0.15.0" version = "0.19.0"
default-features = false default-features = false
features = ["background-jobs-actix", "error-logging"] features = ["error-logging", "metrics", "tokio"]
[dependencies.http-signature-normalization-actix] [dependencies.http-signature-normalization-actix]
version = "0.10.0" version = "0.11.1"
default-features = false default-features = false
features = ["client", "server", "sha-2"] features = ["server", "ring"]
[dependencies.http-signature-normalization-reqwest]
version = "0.13.0"
default-features = false
features = ["middleware", "ring"]
[dependencies.tracing-actix-web] [dependencies.tracing-actix-web]
version = "0.7.5" version = "0.7.9"
[build-dependencies] [build-dependencies]
anyhow = "1.0" color-eyre = "0.6.2"
dotenv = "0.15.0" dotenv = "0.15.0"
ructe = { version = "0.17.0", features = ["sass", "mime03"] } ructe = { version = "0.17.0", features = ["sass", "mime03"] }
toml = "0.7.0" toml = "0.8.0"
[profile.dev.package.rsa] [profile.dev.package.rsa]
opt-level = 3 opt-level = 3

View file

@ -106,7 +106,6 @@ LOCAL_BLURB="<p>Welcome to my cool relay where I have cool relay things happenin
PROMETHEUS_ADDR=0.0.0.0 PROMETHEUS_ADDR=0.0.0.0
PROMETHEUS_PORT=9000 PROMETHEUS_PORT=9000
CLIENT_TIMEOUT=10 CLIENT_TIMEOUT=10
CLIENT_POOL_SIZE=20
DELIVER_CONCURRENCY=8 DELIVER_CONCURRENCY=8
SIGNATURE_THREADS=2 SIGNATURE_THREADS=2
``` ```
@ -161,11 +160,6 @@ Optional - Port to bind to for serving the prometheus scrape endpoint
##### `CLIENT_TIMEOUT` ##### `CLIENT_TIMEOUT`
Optional - How long the relay will hold open a connection (in seconds) to a remote server during Optional - How long the relay will hold open a connection (in seconds) to a remote server during
fetches and deliveries. This defaults to 10 fetches and deliveries. This defaults to 10
##### `CLIENT_POOL_SIZE`
Optional - How many connections the relay should maintain per thread. This value will be multiplied
by the number of cores available to the relay. This defaults to 20, so a 4-core machine will have a
maximum of 160 simultaneous outbound connections. If you run into problems related to "Too many open
files", you can either decrease this number or increase the ulimit for your system.
##### `DELIVER_CONCURRENCY` ##### `DELIVER_CONCURRENCY`
Optional - How many deliver requests the relay should allow to be in-flight per thread. the default Optional - How many deliver requests the relay should allow to be in-flight per thread. the default
is 8 is 8
@ -173,6 +167,12 @@ is 8
Optional - Override number of threads used for signing and verifying requests. Default is Optional - Override number of threads used for signing and verifying requests. Default is
`std::thread::available_parallelism()` (It tries to detect how many cores you have). If it cannot `std::thread::available_parallelism()` (It tries to detect how many cores you have). If it cannot
detect the correct number of cores, it falls back to 1. detect the correct number of cores, it falls back to 1.
##### 'PROXY_URL'
Optional - URL of an HTTP proxy to forward outbound requests through
##### 'PROXY_USERNAME'
Optional - username to provide to the HTTP proxy set with `PROXY_URL` through HTTP Basic Auth
##### 'PROXY_PASSWORD'
Optional - password to provide to the HTTP proxy set with `PROXY_URL` through HTTP Basic Auth
### Subscribing ### Subscribing
Mastodon admins can subscribe to this relay by adding the `/inbox` route to their relay settings. Mastodon admins can subscribe to this relay by adding the `/inbox` route to their relay settings.

View file

@ -1,11 +0,0 @@
ARG REPO_ARCH
FROM asonix/rust-runner:latest-linux-$REPO_ARCH
COPY relay /usr/local/bin/relay
USER app
EXPOSE 8080
VOLUME /mnt
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["/usr/local/bin/relay"]

View file

@ -1,25 +0,0 @@
image: asonix/relay:{{#if build.tag}}{{trimPrefix "v" build.tag}}{{else}}latest{{/if}}
{{#if build.tags}}
tags:
{{#each build.tags}}
- {{this}}
{{/each}}
{{/if}}
manifests:
-
image: asonix/relay:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-amd64
platform:
architecture: amd64
os: linux
-
image: asonix/relay:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-arm64v8
platform:
architecture: arm64
os: linux
variant: v8
-
image: asonix/relay:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-arm32v7
platform:
architecture: arm
os: linux
variant: v7

24
docker/forgejo/Dockerfile Normal file
View file

@ -0,0 +1,24 @@
FROM alpine:3.19
ARG UID=991
ARG GID=991
ENV \
UID=${UID} \
GID=${GID}
USER root
RUN \
addgroup -g "${GID}" app && \
adduser -D -G app -u "${UID}" -g "" -h /opt/app app && \
apk add tini && \
chown -R app:app /mnt
COPY relay /usr/local/bin/relay
USER app
EXPOSE 6669
EXPOSE 8080
VOLUME /mnt
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["/usr/local/bin/relay"]

View file

@ -2,7 +2,7 @@ version: '3.3'
services: services:
relay: relay:
image: asonix/relay:0.3.85 image: asonix/relay:0.3.115
ports: ports:
- "8079:8079" - "8079:8079"
restart: always restart: always

14
flake.lock generated
View file

@ -5,11 +5,11 @@
"systems": "systems" "systems": "systems"
}, },
"locked": { "locked": {
"lastModified": 1689068808, "lastModified": 1710146030,
"narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=", "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide", "owner": "numtide",
"repo": "flake-utils", "repo": "flake-utils",
"rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4", "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -20,16 +20,16 @@
}, },
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1689679375, "lastModified": 1733550349,
"narHash": "sha256-LHUC52WvyVDi9PwyL1QCpaxYWBqp4ir4iL6zgOkmcb8=", "narHash": "sha256-NcGumB4Lr6KSDq+nIqXtNA8QwAQKDSZT7N9OTGWbTrs=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "684c17c429c42515bafb3ad775d2a710947f3d67", "rev": "e2605d0744c2417b09f8bf850dfca42fcf537d34",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "NixOS", "owner": "NixOS",
"ref": "nixos-unstable", "ref": "nixos-24.11",
"repo": "nixpkgs", "repo": "nixpkgs",
"type": "github" "type": "github"
} }

View file

@ -2,7 +2,7 @@
description = "relay"; description = "relay";
inputs = { inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
flake-utils.url = "github:numtide/flake-utils"; flake-utils.url = "github:numtide/flake-utils";
}; };

View file

@ -1,17 +1,15 @@
{ lib { lib
, nixosTests , nixosTests
, protobuf
, rustPlatform , rustPlatform
}: }:
rustPlatform.buildRustPackage { rustPlatform.buildRustPackage {
pname = "relay"; pname = "relay";
version = "0.3.97"; version = "0.3.116";
src = ./.; src = ./.;
cargoLock.lockFile = ./Cargo.lock; cargoLock.lockFile = ./Cargo.lock;
PROTOC = "${protobuf}/bin/protoc"; RUSTFLAGS = "--cfg tokio_unstable";
PROTOC_INCLUDE = "${protobuf}/include";
nativeBuildInputs = [ ]; nativeBuildInputs = [ ];

View file

@ -3,12 +3,13 @@ use crate::{
collector::Snapshot, collector::Snapshot,
config::{AdminUrlKind, Config}, config::{AdminUrlKind, Config},
error::{Error, ErrorKind}, error::{Error, ErrorKind},
extractors::XApiToken,
}; };
use awc::Client; use reqwest_middleware::ClientWithMiddleware;
use serde::de::DeserializeOwned; use serde::de::DeserializeOwned;
pub(crate) async fn allow( pub(crate) async fn allow(
client: &Client, client: &ClientWithMiddleware,
config: &Config, config: &Config,
domains: Vec<String>, domains: Vec<String>,
) -> Result<(), Error> { ) -> Result<(), Error> {
@ -16,7 +17,7 @@ pub(crate) async fn allow(
} }
pub(crate) async fn disallow( pub(crate) async fn disallow(
client: &Client, client: &ClientWithMiddleware,
config: &Config, config: &Config,
domains: Vec<String>, domains: Vec<String>,
) -> Result<(), Error> { ) -> Result<(), Error> {
@ -24,7 +25,7 @@ pub(crate) async fn disallow(
} }
pub(crate) async fn block( pub(crate) async fn block(
client: &Client, client: &ClientWithMiddleware,
config: &Config, config: &Config,
domains: Vec<String>, domains: Vec<String>,
) -> Result<(), Error> { ) -> Result<(), Error> {
@ -32,35 +33,50 @@ pub(crate) async fn block(
} }
pub(crate) async fn unblock( pub(crate) async fn unblock(
client: &Client, client: &ClientWithMiddleware,
config: &Config, config: &Config,
domains: Vec<String>, domains: Vec<String>,
) -> Result<(), Error> { ) -> Result<(), Error> {
post_domains(client, config, domains, AdminUrlKind::Unblock).await post_domains(client, config, domains, AdminUrlKind::Unblock).await
} }
pub(crate) async fn allowed(client: &Client, config: &Config) -> Result<AllowedDomains, Error> { pub(crate) async fn allowed(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<AllowedDomains, Error> {
get_results(client, config, AdminUrlKind::Allowed).await get_results(client, config, AdminUrlKind::Allowed).await
} }
pub(crate) async fn blocked(client: &Client, config: &Config) -> Result<BlockedDomains, Error> { pub(crate) async fn blocked(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<BlockedDomains, Error> {
get_results(client, config, AdminUrlKind::Blocked).await get_results(client, config, AdminUrlKind::Blocked).await
} }
pub(crate) async fn connected(client: &Client, config: &Config) -> Result<ConnectedActors, Error> { pub(crate) async fn connected(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<ConnectedActors, Error> {
get_results(client, config, AdminUrlKind::Connected).await get_results(client, config, AdminUrlKind::Connected).await
} }
pub(crate) async fn stats(client: &Client, config: &Config) -> Result<Snapshot, Error> { pub(crate) async fn stats(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<Snapshot, Error> {
get_results(client, config, AdminUrlKind::Stats).await get_results(client, config, AdminUrlKind::Stats).await
} }
pub(crate) async fn last_seen(client: &Client, config: &Config) -> Result<LastSeen, Error> { pub(crate) async fn last_seen(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<LastSeen, Error> {
get_results(client, config, AdminUrlKind::LastSeen).await get_results(client, config, AdminUrlKind::LastSeen).await
} }
async fn get_results<T: DeserializeOwned>( async fn get_results<T: DeserializeOwned>(
client: &Client, client: &ClientWithMiddleware,
config: &Config, config: &Config,
url_kind: AdminUrlKind, url_kind: AdminUrlKind,
) -> Result<T, Error> { ) -> Result<T, Error> {
@ -68,15 +84,19 @@ async fn get_results<T: DeserializeOwned>(
let iri = config.generate_admin_url(url_kind); let iri = config.generate_admin_url(url_kind);
let mut res = client let res = client
.get(iri.as_str()) .get(iri.as_str())
.insert_header(x_api_token) .header(XApiToken::http1_name(), x_api_token.to_string())
.send() .send()
.await .await
.map_err(|e| ErrorKind::SendRequest(iri.to_string(), e.to_string()))?; .map_err(|e| ErrorKind::SendRequest(iri.to_string(), e.to_string()))?;
if !res.status().is_success() { if !res.status().is_success() {
return Err(ErrorKind::Status(iri.to_string(), res.status()).into()); return Err(ErrorKind::Status(
iri.to_string(),
crate::http1::status_to_http02(res.status()),
)
.into());
} }
let t = res let t = res
@ -88,7 +108,7 @@ async fn get_results<T: DeserializeOwned>(
} }
async fn post_domains( async fn post_domains(
client: &Client, client: &ClientWithMiddleware,
config: &Config, config: &Config,
domains: Vec<String>, domains: Vec<String>,
url_kind: AdminUrlKind, url_kind: AdminUrlKind,
@ -99,8 +119,9 @@ async fn post_domains(
let res = client let res = client
.post(iri.as_str()) .post(iri.as_str())
.insert_header(x_api_token) .header(XApiToken::http1_name(), x_api_token.to_string())
.send_json(&Domains { domains }) .json(&Domains { domains })
.send()
.await .await
.map_err(|e| ErrorKind::SendRequest(iri.to_string(), e.to_string()))?; .map_err(|e| ErrorKind::SendRequest(iri.to_string(), e.to_string()))?;

View file

@ -21,7 +21,7 @@ fn git_info() {
} }
} }
fn version_info() -> Result<(), anyhow::Error> { fn version_info() -> color_eyre::Result<()> {
let cargo_toml = Path::new(&std::env::var("CARGO_MANIFEST_DIR")?).join("Cargo.toml"); let cargo_toml = Path::new(&std::env::var("CARGO_MANIFEST_DIR")?).join("Cargo.toml");
let mut file = File::open(cargo_toml)?; let mut file = File::open(cargo_toml)?;
@ -42,7 +42,7 @@ fn version_info() -> Result<(), anyhow::Error> {
Ok(()) Ok(())
} }
fn main() -> Result<(), anyhow::Error> { fn main() -> color_eyre::Result<()> {
dotenv::dotenv().ok(); dotenv::dotenv().ok();
git_info(); git_info();

View file

@ -1,4 +1,4 @@
use metrics::{Key, Recorder, SetRecorderError}; use metrics::{Key, Metadata, Recorder, SetRecorderError};
use metrics_util::{ use metrics_util::{
registry::{AtomicStorage, GenerationalStorage, Recency, Registry}, registry::{AtomicStorage, GenerationalStorage, Recency, Registry},
MetricKindMask, Summary, MetricKindMask, Summary,
@ -15,6 +15,10 @@ const MINUTES: u64 = 60 * SECONDS;
const HOURS: u64 = 60 * MINUTES; const HOURS: u64 = 60 * MINUTES;
const DAYS: u64 = 24 * HOURS; const DAYS: u64 = 24 * HOURS;
pub(crate) fn recordable(len: usize) -> u32 {
((len as u64) % u64::from(u32::MAX)) as u32
}
type DistributionMap = BTreeMap<Vec<(String, String)>, Summary>; type DistributionMap = BTreeMap<Vec<(String, String)>, Summary>;
#[derive(Clone)] #[derive(Clone)]
@ -289,7 +293,7 @@ impl Inner {
} }
let mut d = self.distributions.write().unwrap(); let mut d = self.distributions.write().unwrap();
let outer_entry = d.entry(name.clone()).or_insert_with(BTreeMap::new); let outer_entry = d.entry(name.clone()).or_default();
let entry = outer_entry let entry = outer_entry
.entry(labels) .entry(labels)
@ -299,7 +303,14 @@ impl Inner {
for sample in samples { for sample in samples {
entry.add(*sample); entry.add(*sample);
} }
}) });
let mut total_len = 0;
for dist_map in d.values() {
total_len += dist_map.len();
}
metrics::gauge!("relay.collector.distributions.size").set(recordable(total_len));
} }
let d = self.distributions.read().unwrap().clone(); let d = self.distributions.read().unwrap().clone();
@ -358,10 +369,11 @@ impl MemoryCollector {
) { ) {
let mut d = self.inner.descriptions.write().unwrap(); let mut d = self.inner.descriptions.write().unwrap();
d.entry(key.as_str().to_owned()).or_insert(description); d.entry(key.as_str().to_owned()).or_insert(description);
metrics::gauge!("relay.collector.descriptions.size").set(recordable(d.len()));
} }
pub(crate) fn install(&self) -> Result<(), SetRecorderError> { pub(crate) fn install(&self) -> Result<(), SetRecorderError<Self>> {
metrics::set_boxed_recorder(Box::new(self.clone())) metrics::set_global_recorder(self.clone())
} }
} }
@ -393,19 +405,19 @@ impl Recorder for MemoryCollector {
self.add_description_if_missing(&key, description) self.add_description_if_missing(&key, description)
} }
fn register_counter(&self, key: &Key) -> metrics::Counter { fn register_counter(&self, key: &Key, _: &Metadata<'_>) -> metrics::Counter {
self.inner self.inner
.registry .registry
.get_or_create_counter(key, |c| c.clone().into()) .get_or_create_counter(key, |c| c.clone().into())
} }
fn register_gauge(&self, key: &Key) -> metrics::Gauge { fn register_gauge(&self, key: &Key, _: &Metadata<'_>) -> metrics::Gauge {
self.inner self.inner
.registry .registry
.get_or_create_gauge(key, |c| c.clone().into()) .get_or_create_gauge(key, |c| c.clone().into())
} }
fn register_histogram(&self, key: &Key) -> metrics::Histogram { fn register_histogram(&self, key: &Key, _: &Metadata<'_>) -> metrics::Histogram {
self.inner self.inner
.registry .registry
.get_or_create_histogram(key, |c| c.clone().into()) .get_or_create_histogram(key, |c| c.clone().into())

View file

@ -11,11 +11,9 @@ use activitystreams::{
}, },
}; };
use config::Environment; use config::Environment;
use http_signature_normalization_actix::prelude::VerifyDigest; use http_signature_normalization_actix::{digest::ring::Sha256, prelude::VerifyDigest};
use rsa::sha2::{Digest, Sha256}; use rustls::sign::CertifiedKey;
use rustls::{Certificate, PrivateKey};
use std::{ use std::{
io::BufReader,
net::{IpAddr, SocketAddr}, net::{IpAddr, SocketAddr},
path::PathBuf, path::PathBuf,
}; };
@ -47,7 +45,9 @@ pub(crate) struct ParsedConfig {
prometheus_port: Option<u16>, prometheus_port: Option<u16>,
deliver_concurrency: u64, deliver_concurrency: u64,
client_timeout: u64, client_timeout: u64,
client_pool_size: usize, proxy_url: Option<IriString>,
proxy_username: Option<String>,
proxy_password: Option<String>,
signature_threads: Option<usize>, signature_threads: Option<usize>,
} }
@ -74,7 +74,7 @@ pub struct Config {
prometheus_config: Option<PrometheusConfig>, prometheus_config: Option<PrometheusConfig>,
deliver_concurrency: u64, deliver_concurrency: u64,
client_timeout: u64, client_timeout: u64,
client_pool_size: usize, proxy_config: Option<ProxyConfig>,
signature_threads: Option<usize>, signature_threads: Option<usize>,
} }
@ -90,6 +90,12 @@ struct PrometheusConfig {
port: u16, port: u16,
} }
#[derive(Clone, Debug)]
struct ProxyConfig {
url: IriString,
auth: Option<(String, String)>,
}
#[derive(Debug)] #[derive(Debug)]
pub enum UrlKind { pub enum UrlKind {
Activity, Activity,
@ -145,7 +151,7 @@ impl std::fmt::Debug for Config {
.field("prometheus_config", &self.prometheus_config) .field("prometheus_config", &self.prometheus_config)
.field("deliver_concurrency", &self.deliver_concurrency) .field("deliver_concurrency", &self.deliver_concurrency)
.field("client_timeout", &self.client_timeout) .field("client_timeout", &self.client_timeout)
.field("client_pool_size", &self.client_pool_size) .field("proxy_config", &self.proxy_config)
.field("signature_threads", &self.signature_threads) .field("signature_threads", &self.signature_threads)
.finish() .finish()
} }
@ -178,7 +184,9 @@ impl Config {
.set_default("prometheus_port", None as Option<u16>)? .set_default("prometheus_port", None as Option<u16>)?
.set_default("deliver_concurrency", 8u64)? .set_default("deliver_concurrency", 8u64)?
.set_default("client_timeout", 10u64)? .set_default("client_timeout", 10u64)?
.set_default("client_pool_size", 20u64)? .set_default("proxy_url", None as Option<&str>)?
.set_default("proxy_username", None as Option<&str>)?
.set_default("proxy_password", None as Option<&str>)?
.set_default("signature_threads", None as Option<u64>)? .set_default("signature_threads", None as Option<u64>)?
.add_source(Environment::default()) .add_source(Environment::default())
.build()?; .build()?;
@ -221,6 +229,26 @@ impl Config {
(None, None) => None, (None, None) => None,
}; };
let proxy_config = match (config.proxy_username, config.proxy_password) {
(Some(username), Some(password)) => config.proxy_url.map(|url| ProxyConfig {
url,
auth: Some((username, password)),
}),
(Some(_), None) => {
tracing::warn!(
"PROXY_USERNAME is set but PROXY_PASSWORD is not set, not setting Proxy Auth"
);
config.proxy_url.map(|url| ProxyConfig { url, auth: None })
}
(None, Some(_)) => {
tracing::warn!(
"PROXY_PASSWORD is set but PROXY_USERNAME is not set, not setting Proxy Auth"
);
config.proxy_url.map(|url| ProxyConfig { url, auth: None })
}
(None, None) => config.proxy_url.map(|url| ProxyConfig { url, auth: None }),
};
let source_url = match Self::git_hash() { let source_url = match Self::git_hash() {
Some(hash) => format!( Some(hash) => format!(
"{}{}{hash}", "{}{}{hash}",
@ -253,7 +281,7 @@ impl Config {
prometheus_config, prometheus_config,
deliver_concurrency: config.deliver_concurrency, deliver_concurrency: config.deliver_concurrency,
client_timeout: config.client_timeout, client_timeout: config.client_timeout,
client_pool_size: config.client_pool_size, proxy_config,
signature_threads: config.signature_threads, signature_threads: config.signature_threads,
}) })
} }
@ -283,43 +311,34 @@ impl Config {
Some((config.addr, config.port).into()) Some((config.addr, config.port).into())
} }
pub(crate) fn open_keys(&self) -> Result<Option<(Vec<Certificate>, PrivateKey)>, Error> { pub(crate) async fn open_keys(&self) -> Result<Option<CertifiedKey>, Error> {
let tls = if let Some(tls) = &self.tls { let tls = if let Some(tls) = &self.tls {
tls tls
} else { } else {
tracing::warn!("No TLS config present"); tracing::info!("No TLS config present");
return Ok(None); return Ok(None);
}; };
let mut certs_reader = BufReader::new(std::fs::File::open(&tls.cert)?); let certs_bytes = tokio::fs::read(&tls.cert).await?;
let certs = rustls_pemfile::certs(&mut certs_reader)?; let certs =
rustls_pemfile::certs(&mut certs_bytes.as_slice()).collect::<Result<Vec<_>, _>>()?;
if certs.is_empty() { if certs.is_empty() {
tracing::warn!("No certs read from certificate file"); tracing::warn!("No certs read from certificate file");
return Ok(None); return Ok(None);
} }
let mut key_reader = BufReader::new(std::fs::File::open(&tls.key)?); let key_bytes = tokio::fs::read(&tls.key).await?;
let key = rustls_pemfile::read_one(&mut key_reader)?; let key = if let Some(key) = rustls_pemfile::private_key(&mut key_bytes.as_slice())? {
key
let certs = certs.into_iter().map(Certificate).collect();
let key = if let Some(key) = key {
match key {
rustls_pemfile::Item::RSAKey(der) => PrivateKey(der),
rustls_pemfile::Item::PKCS8Key(der) => PrivateKey(der),
rustls_pemfile::Item::ECKey(der) => PrivateKey(der),
_ => {
tracing::warn!("Unknown key format: {:?}", key);
return Ok(None);
}
}
} else { } else {
tracing::warn!("Failed to read private key"); tracing::warn!("Failed to read private key");
return Ok(None); return Ok(None);
}; };
Ok(Some((certs, key))) let key = rustls::crypto::ring::sign::any_supported_type(&key)?;
Ok(Some(CertifiedKey::new(certs, key)))
} }
pub(crate) fn footer_blurb(&self) -> Option<crate::templates::Html<String>> { pub(crate) fn footer_blurb(&self) -> Option<crate::templates::Html<String>> {
@ -469,8 +488,10 @@ impl Config {
) )
} }
pub(crate) fn client_pool_size(&self) -> usize { pub(crate) fn proxy_config(&self) -> Option<(&IriString, Option<(&str, &str)>)> {
self.client_pool_size self.proxy_config.as_ref().map(|ProxyConfig { url, auth }| {
(url, auth.as_ref().map(|(u, p)| (u.as_str(), p.as_str())))
})
} }
pub(crate) fn source_code(&self) -> &IriString { pub(crate) fn source_code(&self) -> &IriString {

View file

@ -2,7 +2,7 @@ use crate::{
apub::AcceptedActors, apub::AcceptedActors,
db::{Actor, Db}, db::{Actor, Db},
error::{Error, ErrorKind}, error::{Error, ErrorKind},
requests::Requests, requests::{BreakerStrategy, Requests},
}; };
use activitystreams::{iri_string::types::IriString, prelude::*}; use activitystreams::{iri_string::types::IriString, prelude::*};
use std::time::{Duration, SystemTime}; use std::time::{Duration, SystemTime};
@ -71,7 +71,9 @@ impl ActorCache {
id: &IriString, id: &IriString,
requests: &Requests, requests: &Requests,
) -> Result<Actor, Error> { ) -> Result<Actor, Error> {
let accepted_actor = requests.fetch::<AcceptedActors>(id).await?; let accepted_actor = requests
.fetch::<AcceptedActors>(id, BreakerStrategy::Require2XX)
.await?;
let input_authority = id.authority_components().ok_or(ErrorKind::MissingDomain)?; let input_authority = id.authority_components().ok_or(ErrorKind::MissingDomain)?;
let accepted_actor_id = accepted_actor let accepted_actor_id = accepted_actor

View file

@ -9,10 +9,10 @@ pub(crate) struct LastOnline {
impl LastOnline { impl LastOnline {
pub(crate) fn mark_seen(&self, iri: &IriStr) { pub(crate) fn mark_seen(&self, iri: &IriStr) {
if let Some(authority) = iri.authority_str() { if let Some(authority) = iri.authority_str() {
self.domains let mut guard = self.domains.lock().unwrap();
.lock() guard.insert(authority.to_string(), OffsetDateTime::now_utc());
.unwrap() metrics::gauge!("relay.last-online.size",)
.insert(authority.to_string(), OffsetDateTime::now_utc()); .set(crate::collector::recordable(guard.len()));
} }
} }

View file

@ -1,5 +1,4 @@
use crate::{ use crate::{
config::{Config, UrlKind},
data::NodeCache, data::NodeCache,
db::Db, db::Db,
error::Error, error::Error,
@ -10,6 +9,7 @@ use activitystreams::iri_string::types::IriString;
use actix_web::web; use actix_web::web;
use lru::LruCache; use lru::LruCache;
use rand::thread_rng; use rand::thread_rng;
use reqwest_middleware::ClientWithMiddleware;
use rsa::{RsaPrivateKey, RsaPublicKey}; use rsa::{RsaPrivateKey, RsaPublicKey};
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
@ -17,10 +17,10 @@ use super::LastOnline;
#[derive(Clone)] #[derive(Clone)]
pub struct State { pub struct State {
pub(crate) requests: Requests,
pub(crate) public_key: RsaPublicKey, pub(crate) public_key: RsaPublicKey,
private_key: RsaPrivateKey,
object_cache: Arc<RwLock<LruCache<IriString, IriString>>>, object_cache: Arc<RwLock<LruCache<IriString, IriString>>>,
node_cache: NodeCache, pub(crate) node_cache: NodeCache,
breakers: Breakers, breakers: Breakers,
pub(crate) last_online: Arc<LastOnline>, pub(crate) last_online: Arc<LastOnline>,
pub(crate) db: Db, pub(crate) db: Db,
@ -37,23 +37,6 @@ impl std::fmt::Debug for State {
} }
impl State { impl State {
pub(crate) fn node_cache(&self) -> NodeCache {
self.node_cache.clone()
}
pub(crate) fn requests(&self, config: &Config, spawner: Spawner) -> Requests {
Requests::new(
config.generate_url(UrlKind::MainKey).to_string(),
self.private_key.clone(),
config.user_agent(),
self.breakers.clone(),
self.last_online.clone(),
config.client_pool_size(),
config.client_timeout(),
spawner,
)
}
#[tracing::instrument( #[tracing::instrument(
level = "debug", level = "debug",
name = "Get inboxes for other domains", name = "Get inboxes for other domains",
@ -90,11 +73,22 @@ impl State {
} }
pub(crate) fn cache(&self, object_id: IriString, actor_id: IriString) { pub(crate) fn cache(&self, object_id: IriString, actor_id: IriString) {
self.object_cache.write().unwrap().put(object_id, actor_id); let mut guard = self.object_cache.write().unwrap();
guard.put(object_id, actor_id);
metrics::gauge!("relay.object-cache.size").set(crate::collector::recordable(guard.len()));
}
pub(crate) fn is_connected(&self, iri: &IriString) -> bool {
self.breakers.should_try(iri)
} }
#[tracing::instrument(level = "debug", name = "Building state", skip_all)] #[tracing::instrument(level = "debug", name = "Building state", skip_all)]
pub(crate) async fn build(db: Db) -> Result<Self, Error> { pub(crate) async fn build(
db: Db,
key_id: String,
spawner: Spawner,
client: ClientWithMiddleware,
) -> Result<Self, Error> {
let private_key = if let Ok(Some(key)) = db.private_key().await { let private_key = if let Ok(Some(key)) = db.private_key().await {
tracing::debug!("Using existing key"); tracing::debug!("Using existing key");
key key
@ -113,16 +107,28 @@ impl State {
let public_key = private_key.to_public_key(); let public_key = private_key.to_public_key();
let state = State { let breakers = Breakers::default();
public_key, let last_online = Arc::new(LastOnline::empty());
let requests = Requests::new(
key_id,
private_key, private_key,
breakers.clone(),
last_online.clone(),
spawner,
client,
);
let state = State {
requests,
public_key,
object_cache: Arc::new(RwLock::new(LruCache::new( object_cache: Arc::new(RwLock::new(LruCache::new(
(1024 * 8).try_into().expect("nonzero"), (1024 * 8).try_into().expect("nonzero"),
))), ))),
node_cache: NodeCache::new(db.clone()), node_cache: NodeCache::new(db.clone()),
breakers: Breakers::default(), breakers,
db, db,
last_online: Arc::new(LastOnline::empty()), last_online,
}; };
Ok(state) Ok(state)

149
src/db.rs
View file

@ -7,7 +7,7 @@ use rsa::{
pkcs8::{DecodePrivateKey, EncodePrivateKey}, pkcs8::{DecodePrivateKey, EncodePrivateKey},
RsaPrivateKey, RsaPrivateKey,
}; };
use sled::{Batch, Tree}; use sled::{transaction::TransactionError, Batch, Transactional, Tree};
use std::{ use std::{
collections::{BTreeMap, HashMap}, collections::{BTreeMap, HashMap},
sync::{ sync::{
@ -283,10 +283,15 @@ impl Db {
pub(crate) async fn check_health(&self) -> Result<(), Error> { pub(crate) async fn check_health(&self) -> Result<(), Error> {
let next = self.inner.healthz_counter.fetch_add(1, Ordering::Relaxed); let next = self.inner.healthz_counter.fetch_add(1, Ordering::Relaxed);
self.unblock(move |inner| { self.unblock(move |inner| {
inner let res = inner
.healthz .healthz
.insert("healthz", &next.to_be_bytes()[..]) .insert("healthz", &next.to_be_bytes()[..])
.map_err(Error::from) .map_err(Error::from);
metrics::gauge!("relay.db.healthz.size")
.set(crate::collector::recordable(inner.healthz.len()));
res
}) })
.await?; .await?;
self.inner.healthz.flush_async().await?; self.inner.healthz.flush_async().await?;
@ -349,6 +354,9 @@ impl Db {
.actor_id_info .actor_id_info
.insert(actor_id.as_str().as_bytes(), vec)?; .insert(actor_id.as_str().as_bytes(), vec)?;
metrics::gauge!("relay.db.actor-id-info.size")
.set(crate::collector::recordable(inner.actor_id_info.len()));
Ok(()) Ok(())
}) })
.await .await
@ -383,6 +391,9 @@ impl Db {
.actor_id_instance .actor_id_instance
.insert(actor_id.as_str().as_bytes(), vec)?; .insert(actor_id.as_str().as_bytes(), vec)?;
metrics::gauge!("relay.db.actor-id-instance.size")
.set(crate::collector::recordable(inner.actor_id_instance.len()));
Ok(()) Ok(())
}) })
.await .await
@ -417,6 +428,9 @@ impl Db {
.actor_id_contact .actor_id_contact
.insert(actor_id.as_str().as_bytes(), vec)?; .insert(actor_id.as_str().as_bytes(), vec)?;
metrics::gauge!("relay.db.actor-id-contact.size")
.set(crate::collector::recordable(inner.actor_id_contact.len()));
Ok(()) Ok(())
}) })
.await .await
@ -447,6 +461,12 @@ impl Db {
inner inner
.media_url_media_id .media_url_media_id
.insert(url.as_str().as_bytes(), id.as_bytes())?; .insert(url.as_str().as_bytes(), id.as_bytes())?;
metrics::gauge!("relay.db.media-id-media-url.size")
.set(crate::collector::recordable(inner.media_id_media_url.len()));
metrics::gauge!("relay.db.media-url-media-id.size")
.set(crate::collector::recordable(inner.media_url_media_id.len()));
Ok(()) Ok(())
}) })
.await .await
@ -538,6 +558,14 @@ impl Db {
inner inner
.actor_id_actor .actor_id_actor
.insert(actor.id.as_str().as_bytes(), vec)?; .insert(actor.id.as_str().as_bytes(), vec)?;
metrics::gauge!("relay.db.public-key-actor-id.size").set(crate::collector::recordable(
inner.public_key_id_actor_id.len(),
));
metrics::gauge!("relay.db.actor-id-actor.size").set(crate::collector::recordable(
inner.public_key_id_actor_id.len(),
));
Ok(()) Ok(())
}) })
.await .await
@ -550,6 +578,10 @@ impl Db {
.connected_actor_ids .connected_actor_ids
.remove(actor_id.as_str().as_bytes())?; .remove(actor_id.as_str().as_bytes())?;
metrics::gauge!("relay.db.connected-actor-ids.size").set(crate::collector::recordable(
inner.connected_actor_ids.len(),
));
Ok(()) Ok(())
}) })
.await .await
@ -562,6 +594,10 @@ impl Db {
.connected_actor_ids .connected_actor_ids
.insert(actor_id.as_str().as_bytes(), actor_id.as_str().as_bytes())?; .insert(actor_id.as_str().as_bytes(), actor_id.as_str().as_bytes())?;
metrics::gauge!("relay.db.connected-actor-ids.size").set(crate::collector::recordable(
inner.connected_actor_ids.len(),
));
Ok(()) Ok(())
}) })
.await .await
@ -569,30 +605,64 @@ impl Db {
pub(crate) async fn add_blocks(&self, domains: Vec<String>) -> Result<(), Error> { pub(crate) async fn add_blocks(&self, domains: Vec<String>) -> Result<(), Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
for connected in inner.connected_by_domain(&domains) { let connected_by_domain = inner.connected_by_domain(&domains).collect::<Vec<_>>();
inner
.connected_actor_ids
.remove(connected.as_str().as_bytes())?;
}
for authority in &domains { let res = (
inner &inner.connected_actor_ids,
.blocked_domains &inner.blocked_domains,
.insert(domain_key(authority), authority.as_bytes())?; &inner.allowed_domains,
inner.allowed_domains.remove(domain_key(authority))?; )
} .transaction(|(connected, blocked, allowed)| {
let mut connected_batch = Batch::default();
let mut blocked_batch = Batch::default();
let mut allowed_batch = Batch::default();
Ok(()) for connected in &connected_by_domain {
connected_batch.remove(connected.as_str().as_bytes());
}
for authority in &domains {
blocked_batch
.insert(domain_key(authority).as_bytes(), authority.as_bytes());
allowed_batch.remove(domain_key(authority).as_bytes());
}
connected.apply_batch(&connected_batch)?;
blocked.apply_batch(&blocked_batch)?;
allowed.apply_batch(&allowed_batch)?;
Ok(())
});
metrics::gauge!("relay.db.connected-actor-ids.size").set(crate::collector::recordable(
inner.connected_actor_ids.len(),
));
metrics::gauge!("relay.db.blocked-domains.size")
.set(crate::collector::recordable(inner.blocked_domains.len()));
metrics::gauge!("relay.db.allowed-domains.size")
.set(crate::collector::recordable(inner.allowed_domains.len()));
match res {
Ok(()) => Ok(()),
Err(TransactionError::Abort(e) | TransactionError::Storage(e)) => Err(e.into()),
}
}) })
.await .await
} }
pub(crate) async fn remove_blocks(&self, domains: Vec<String>) -> Result<(), Error> { pub(crate) async fn remove_blocks(&self, domains: Vec<String>) -> Result<(), Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
let mut blocked_batch = Batch::default();
for authority in &domains { for authority in &domains {
inner.blocked_domains.remove(domain_key(authority))?; blocked_batch.remove(domain_key(authority).as_bytes());
} }
inner.blocked_domains.apply_batch(blocked_batch)?;
metrics::gauge!("relay.db.blocked-domains.size")
.set(crate::collector::recordable(inner.blocked_domains.len()));
Ok(()) Ok(())
}) })
.await .await
@ -600,12 +670,17 @@ impl Db {
pub(crate) async fn add_allows(&self, domains: Vec<String>) -> Result<(), Error> { pub(crate) async fn add_allows(&self, domains: Vec<String>) -> Result<(), Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
let mut allowed_batch = Batch::default();
for authority in &domains { for authority in &domains {
inner allowed_batch.insert(domain_key(authority).as_bytes(), authority.as_bytes());
.allowed_domains
.insert(domain_key(authority), authority.as_bytes())?;
} }
inner.allowed_domains.apply_batch(allowed_batch)?;
metrics::gauge!("relay.db.allowed-domains.size")
.set(crate::collector::recordable(inner.allowed_domains.len()));
Ok(()) Ok(())
}) })
.await .await
@ -614,17 +689,32 @@ impl Db {
pub(crate) async fn remove_allows(&self, domains: Vec<String>) -> Result<(), Error> { pub(crate) async fn remove_allows(&self, domains: Vec<String>) -> Result<(), Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
if inner.restricted_mode { if inner.restricted_mode {
for connected in inner.connected_by_domain(&domains) { let connected_by_domain = inner.connected_by_domain(&domains).collect::<Vec<_>>();
inner
.connected_actor_ids let mut connected_batch = Batch::default();
.remove(connected.as_str().as_bytes())?;
for connected in &connected_by_domain {
connected_batch.remove(connected.as_str().as_bytes());
} }
inner.connected_actor_ids.apply_batch(connected_batch)?;
metrics::gauge!("relay.db.connected-actor-ids.size").set(
crate::collector::recordable(inner.connected_actor_ids.len()),
);
} }
let mut allowed_batch = Batch::default();
for authority in &domains { for authority in &domains {
inner.allowed_domains.remove(domain_key(authority))?; allowed_batch.remove(domain_key(authority).as_bytes());
} }
inner.allowed_domains.apply_batch(allowed_batch)?;
metrics::gauge!("relay.db.allowed-domains.size")
.set(crate::collector::recordable(inner.allowed_domains.len()));
Ok(()) Ok(())
}) })
.await .await
@ -665,6 +755,10 @@ impl Db {
inner inner
.settings .settings
.insert("private-key".as_bytes(), pem_pkcs8.as_bytes())?; .insert("private-key".as_bytes(), pem_pkcs8.as_bytes())?;
metrics::gauge!("relay.db.settings.size")
.set(crate::collector::recordable(inner.settings.len()));
Ok(()) Ok(())
}) })
.await .await
@ -750,6 +844,11 @@ mod tests {
{ {
let db = let db =
Db::build_inner(true, sled::Config::new().temporary(true).open().unwrap()).unwrap(); Db::build_inner(true, sled::Config::new().temporary(true).open().unwrap()).unwrap();
actix_rt::System::new().block_on((f)(db));
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
.block_on((f)(db));
} }
} }

View file

@ -1,57 +1,85 @@
use activitystreams::checked::CheckError; use activitystreams::checked::CheckError;
use actix_rt::task::JoinError;
use actix_web::{ use actix_web::{
error::{BlockingError, ResponseError}, error::{BlockingError, ResponseError},
http::StatusCode, http::StatusCode,
HttpResponse, HttpResponse,
}; };
use http_signature_normalization_actix::PrepareSignError; use background_jobs::BoxError;
use std::{convert::Infallible, fmt::Debug, io}; use color_eyre::eyre::Error as Report;
use tracing_error::SpanTrace; use http_signature_normalization_reqwest::SignError;
use std::{convert::Infallible, io, sync::Arc};
use tokio::task::JoinError;
#[derive(Clone)]
struct ArcKind {
kind: Arc<ErrorKind>,
}
impl std::fmt::Debug for ArcKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.kind.fmt(f)
}
}
impl std::fmt::Display for ArcKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.kind.fmt(f)
}
}
impl std::error::Error for ArcKind {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
self.kind.source()
}
}
pub(crate) struct Error { pub(crate) struct Error {
context: String, kind: ArcKind,
kind: ErrorKind, display: Box<str>,
debug: Box<str>,
} }
impl Error { impl Error {
fn kind(&self) -> &ErrorKind {
&self.kind.kind
}
pub(crate) fn is_breaker(&self) -> bool { pub(crate) fn is_breaker(&self) -> bool {
matches!(self.kind, ErrorKind::Breaker) matches!(self.kind(), ErrorKind::Breaker)
} }
pub(crate) fn is_not_found(&self) -> bool { pub(crate) fn is_not_found(&self) -> bool {
matches!(self.kind, ErrorKind::Status(_, StatusCode::NOT_FOUND)) matches!(self.kind(), ErrorKind::Status(_, StatusCode::NOT_FOUND))
} }
pub(crate) fn is_bad_request(&self) -> bool { pub(crate) fn is_bad_request(&self) -> bool {
matches!(self.kind, ErrorKind::Status(_, StatusCode::BAD_REQUEST)) matches!(self.kind(), ErrorKind::Status(_, StatusCode::BAD_REQUEST))
} }
pub(crate) fn is_gone(&self) -> bool { pub(crate) fn is_gone(&self) -> bool {
matches!(self.kind, ErrorKind::Status(_, StatusCode::GONE)) matches!(self.kind(), ErrorKind::Status(_, StatusCode::GONE))
} }
pub(crate) fn is_malformed_json(&self) -> bool { pub(crate) fn is_malformed_json(&self) -> bool {
matches!(self.kind, ErrorKind::Json(_)) matches!(self.kind(), ErrorKind::Json(_))
} }
} }
impl std::fmt::Debug for Error { impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
writeln!(f, "{:?}", self.kind) f.write_str(&self.debug)
} }
} }
impl std::fmt::Display for Error { impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
writeln!(f, "{}", self.kind)?; f.write_str(&self.display)
std::fmt::Display::fmt(&self.context, f)
} }
} }
impl std::error::Error for Error { impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
self.kind.source() self.kind().source()
} }
} }
@ -60,56 +88,82 @@ where
ErrorKind: From<T>, ErrorKind: From<T>,
{ {
fn from(error: T) -> Self { fn from(error: T) -> Self {
let kind = ArcKind {
kind: Arc::new(ErrorKind::from(error)),
};
let report = Report::new(kind.clone());
let display = format!("{report}");
let debug = format!("{report:?}");
Error { Error {
context: SpanTrace::capture().to_string(), kind,
kind: error.into(), display: Box::from(display),
debug: Box::from(debug),
} }
} }
} }
#[derive(Debug, thiserror::Error)] #[derive(Debug, thiserror::Error)]
pub(crate) enum ErrorKind { pub(crate) enum ErrorKind {
#[error("Error queueing job, {0}")] #[error("Error in extractor")]
Queue(anyhow::Error), Extractor(#[from] crate::extractors::ErrorKind),
#[error("Error in configuration, {0}")] #[error("Error queueing job")]
Queue(#[from] BoxError),
#[error("Error in configuration")]
Config(#[from] config::ConfigError), Config(#[from] config::ConfigError),
#[error("Couldn't parse key, {0}")] #[error("Couldn't parse key")]
Pkcs8(#[from] rsa::pkcs8::Error), Pkcs8(#[from] rsa::pkcs8::Error),
#[error("Couldn't encode public key, {0}")] #[error("Couldn't encode public key")]
Spki(#[from] rsa::pkcs8::spki::Error), Spki(#[from] rsa::pkcs8::spki::Error),
#[error("Couldn't parse IRI, {0}")] #[error("Couldn't sign request")]
SignRequest,
#[error("Response body from server exceeded limits")]
BodyTooLarge,
#[error("Couldn't make request")]
Reqwest(#[from] reqwest::Error),
#[error("Couldn't make request")]
ReqwestMiddleware(#[from] reqwest_middleware::Error),
#[error("Couldn't parse IRI")]
ParseIri(#[from] activitystreams::iri_string::validate::Error), ParseIri(#[from] activitystreams::iri_string::validate::Error),
#[error("Couldn't normalize IRI, {0}")] #[error("Couldn't normalize IRI")]
NormalizeIri(#[from] std::collections::TryReserveError), NormalizeIri(#[from] std::collections::TryReserveError),
#[error("Couldn't perform IO, {0}")] #[error("Couldn't perform IO")]
Io(#[from] io::Error), Io(#[from] io::Error),
#[error("Couldn't sign string, {0}")] #[error("Couldn't sign string, {0}")]
Rsa(rsa::errors::Error), Rsa(rsa::errors::Error),
#[error("Couldn't use db, {0}")] #[error("Couldn't use db")]
Sled(#[from] sled::Error), Sled(#[from] sled::Error),
#[error("Couldn't do the json thing, {0}")] #[error("Couldn't do the json thing")]
Json(#[from] serde_json::Error), Json(#[from] serde_json::Error),
#[error("Couldn't build signing string, {0}")] #[error("Couldn't sign request")]
PrepareSign(#[from] PrepareSignError), Sign(#[from] SignError),
#[error("Couldn't sign digest")] #[error("Couldn't sign digest")]
Signature(#[from] rsa::signature::Error), Signature(#[from] rsa::signature::Error),
#[error("Couldn't read signature")] #[error("Couldn't prepare TLS private key")]
ReadSignature(rsa::signature::Error), PrepareKey(#[from] rustls::Error),
#[error("Couldn't verify signature")] #[error("Couldn't verify signature")]
VerifySignature(rsa::signature::Error), VerifySignature,
#[error("Failed to encode key der")]
DerEncode,
#[error("Couldn't parse the signature header")] #[error("Couldn't parse the signature header")]
HeaderValidation(#[from] actix_web::http::header::InvalidHeaderValue), HeaderValidation(#[from] actix_web::http::header::InvalidHeaderValue),
@ -135,10 +189,10 @@ pub(crate) enum ErrorKind {
#[error("Wrong ActivityPub kind, {0}")] #[error("Wrong ActivityPub kind, {0}")]
Kind(String), Kind(String),
#[error("Too many CPUs, {0}")] #[error("Too many CPUs")]
CpuCount(#[from] std::num::TryFromIntError), CpuCount(#[from] std::num::TryFromIntError),
#[error("{0}")] #[error("Host mismatch")]
HostMismatch(#[from] CheckError), HostMismatch(#[from] CheckError),
#[error("Couldn't flush buffer")] #[error("Couldn't flush buffer")]
@ -192,7 +246,7 @@ pub(crate) enum ErrorKind {
impl ResponseError for Error { impl ResponseError for Error {
fn status_code(&self) -> StatusCode { fn status_code(&self) -> StatusCode {
match self.kind { match self.kind() {
ErrorKind::NotAllowed(_) | ErrorKind::WrongActor(_) | ErrorKind::BadActor(_, _) => { ErrorKind::NotAllowed(_) | ErrorKind::WrongActor(_) | ErrorKind::BadActor(_, _) => {
StatusCode::FORBIDDEN StatusCode::FORBIDDEN
} }
@ -212,7 +266,7 @@ impl ResponseError for Error {
.insert_header(("Content-Type", "application/activity+json")) .insert_header(("Content-Type", "application/activity+json"))
.body( .body(
serde_json::to_string(&serde_json::json!({ serde_json::to_string(&serde_json::json!({
"error": self.kind.to_string(), "error": self.kind().to_string(),
})) }))
.unwrap_or_else(|_| "{}".to_string()), .unwrap_or_else(|_| "{}".to_string()),
) )
@ -248,3 +302,9 @@ impl From<http_signature_normalization_actix::Canceled> for ErrorKind {
Self::Canceled Self::Canceled
} }
} }
impl From<http_signature_normalization_reqwest::Canceled> for ErrorKind {
fn from(_: http_signature_normalization_reqwest::Canceled) -> Self {
Self::Canceled
}
}

View file

@ -1,20 +1,15 @@
use actix_web::{ use actix_web::{
dev::Payload, dev::Payload,
error::ParseError, error::ParseError,
http::{ http::header::{from_one_raw_str, Header, HeaderName, HeaderValue, TryIntoHeaderValue},
header::{from_one_raw_str, Header, HeaderName, HeaderValue, TryIntoHeaderValue},
StatusCode,
},
web::Data, web::Data,
FromRequest, HttpMessage, HttpRequest, HttpResponse, ResponseError, FromRequest, HttpMessage, HttpRequest,
}; };
use bcrypt::{BcryptError, DEFAULT_COST}; use bcrypt::{BcryptError, DEFAULT_COST};
use futures_util::future::LocalBoxFuture;
use http_signature_normalization_actix::{prelude::InvalidHeaderValue, Canceled, Spawn}; use http_signature_normalization_actix::{prelude::InvalidHeaderValue, Canceled, Spawn};
use std::{convert::Infallible, str::FromStr, time::Instant}; use std::{convert::Infallible, str::FromStr, time::Instant};
use tracing_error::SpanTrace;
use crate::{db::Db, spawner::Spawner}; use crate::{db::Db, error::Error, future::LocalBoxFuture, spawner::Spawner};
#[derive(Clone)] #[derive(Clone)]
pub(crate) struct AdminConfig { pub(crate) struct AdminConfig {
@ -29,7 +24,7 @@ impl AdminConfig {
} }
fn verify(&self, token: XApiToken) -> Result<bool, Error> { fn verify(&self, token: XApiToken) -> Result<bool, Error> {
bcrypt::verify(&token.0, &self.hashed_api_token).map_err(Error::bcrypt_verify) bcrypt::verify(token.0, &self.hashed_api_token).map_err(Error::bcrypt_verify)
} }
} }
@ -84,74 +79,42 @@ impl Admin {
} }
} }
#[derive(Debug, thiserror::Error)]
#[error("Failed authentication")]
pub(crate) struct Error {
context: String,
#[source]
kind: ErrorKind,
}
impl Error { impl Error {
fn invalid() -> Self { fn invalid() -> Self {
Error { Error::from(ErrorKind::Invalid)
context: SpanTrace::capture().to_string(),
kind: ErrorKind::Invalid,
}
} }
fn missing_config() -> Self { fn missing_config() -> Self {
Error { Error::from(ErrorKind::MissingConfig)
context: SpanTrace::capture().to_string(),
kind: ErrorKind::MissingConfig,
}
} }
fn missing_db() -> Self { fn missing_db() -> Self {
Error { Error::from(ErrorKind::MissingDb)
context: SpanTrace::capture().to_string(),
kind: ErrorKind::MissingDb,
}
} }
fn missing_spawner() -> Self { fn missing_spawner() -> Self {
Error { Error::from(ErrorKind::MissingSpawner)
context: SpanTrace::capture().to_string(),
kind: ErrorKind::MissingSpawner,
}
} }
fn bcrypt_verify(e: BcryptError) -> Self { fn bcrypt_verify(e: BcryptError) -> Self {
Error { Error::from(ErrorKind::BCryptVerify(e))
context: SpanTrace::capture().to_string(),
kind: ErrorKind::BCryptVerify(e),
}
} }
fn bcrypt_hash(e: BcryptError) -> Self { fn bcrypt_hash(e: BcryptError) -> Self {
Error { Error::from(ErrorKind::BCryptHash(e))
context: SpanTrace::capture().to_string(),
kind: ErrorKind::BCryptHash(e),
}
} }
fn parse_header(e: ParseError) -> Self { fn parse_header(e: ParseError) -> Self {
Error { Error::from(ErrorKind::ParseHeader(e))
context: SpanTrace::capture().to_string(),
kind: ErrorKind::ParseHeader(e),
}
} }
fn canceled(_: Canceled) -> Self { fn canceled(_: Canceled) -> Self {
Error { Error::from(ErrorKind::Canceled)
context: SpanTrace::capture().to_string(),
kind: ErrorKind::Canceled,
}
} }
} }
#[derive(Debug, thiserror::Error)] #[derive(Debug, thiserror::Error)]
enum ErrorKind { pub(crate) enum ErrorKind {
#[error("Invalid API Token")] #[error("Invalid API Token")]
Invalid, Invalid,
@ -177,20 +140,6 @@ enum ErrorKind {
ParseHeader(#[source] ParseError), ParseHeader(#[source] ParseError),
} }
impl ResponseError for Error {
fn status_code(&self) -> StatusCode {
match self.kind {
ErrorKind::Invalid | ErrorKind::ParseHeader(_) => StatusCode::BAD_REQUEST,
_ => StatusCode::INTERNAL_SERVER_ERROR,
}
}
fn error_response(&self) -> HttpResponse {
HttpResponse::build(self.status_code())
.json(serde_json::json!({ "msg": self.kind.to_string() }))
}
}
impl FromRequest for Admin { impl FromRequest for Admin {
type Error = Error; type Error = Error;
type Future = LocalBoxFuture<'static, Result<Self, Self::Error>>; type Future = LocalBoxFuture<'static, Result<Self, Self::Error>>;
@ -201,10 +150,8 @@ impl FromRequest for Admin {
Box::pin(async move { Box::pin(async move {
let (db, c, s, t) = res?; let (db, c, s, t) = res?;
Self::verify(c, s, t).await?; Self::verify(c, s, t).await?;
metrics::histogram!( metrics::histogram!("relay.admin.verify")
"relay.admin.verify", .record(now.elapsed().as_micros() as f64 / 1_000_000_f64);
now.elapsed().as_micros() as f64 / 1_000_000_f64
);
Ok(Admin { db }) Ok(Admin { db })
}) })
} }
@ -216,6 +163,10 @@ impl XApiToken {
pub(crate) fn new(token: String) -> Self { pub(crate) fn new(token: String) -> Self {
Self(token) Self(token)
} }
pub(crate) const fn http1_name() -> reqwest::header::HeaderName {
reqwest::header::HeaderName::from_static("x-api-token")
}
} }
impl Header for XApiToken { impl Header for XApiToken {
@ -243,3 +194,9 @@ impl FromStr for XApiToken {
Ok(XApiToken(s.to_string())) Ok(XApiToken(s.to_string()))
} }
} }
impl std::fmt::Display for XApiToken {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
}
}

4
src/future.rs Normal file
View file

@ -0,0 +1,4 @@
use std::{future::Future, pin::Pin};
pub(crate) type LocalBoxFuture<'a, T> = Pin<Box<dyn Future<Output = T> + 'a>>;
pub(crate) type BoxFuture<'a, T> = Pin<Box<dyn Future<Output = T> + Send + 'a>>;

18
src/http1.rs Normal file
View file

@ -0,0 +1,18 @@
pub(crate) fn name_to_http02(
name: &reqwest::header::HeaderName,
) -> actix_web::http::header::HeaderName {
actix_web::http::header::HeaderName::from_bytes(name.as_ref())
.expect("headername conversions always work")
}
pub(crate) fn value_to_http02(
value: &reqwest::header::HeaderValue,
) -> actix_web::http::header::HeaderValue {
actix_web::http::header::HeaderValue::from_bytes(value.as_bytes())
.expect("headervalue conversions always work")
}
pub(crate) fn status_to_http02(status: reqwest::StatusCode) -> actix_web::http::StatusCode {
actix_web::http::StatusCode::from_u16(status.as_u16())
.expect("statuscode conversions always work")
}

View file

@ -14,15 +14,15 @@ pub(crate) use self::{
use crate::{ use crate::{
config::Config, config::Config,
data::{ActorCache, MediaCache, NodeCache, State}, data::{ActorCache, MediaCache, State},
error::{Error, ErrorKind}, error::{Error, ErrorKind},
jobs::{process_listeners::Listeners, record_last_online::RecordLastOnline}, jobs::{process_listeners::Listeners, record_last_online::RecordLastOnline},
requests::Requests,
spawner::Spawner,
}; };
use background_jobs::{ use background_jobs::{
memory_storage::{ActixTimer, Storage}, memory_storage::{Storage, TokioTimer},
Job, QueueHandle, WorkerConfig, metrics::MetricsStorage,
tokio::{QueueHandle, WorkerConfig},
Job,
}; };
use std::time::Duration; use std::time::Duration;
@ -40,23 +40,26 @@ fn debug_object(activity: &serde_json::Value) -> &serde_json::Value {
object object
} }
pub(crate) fn build_storage() -> MetricsStorage<Storage<TokioTimer>> {
MetricsStorage::wrap(Storage::new(TokioTimer))
}
pub(crate) fn create_workers( pub(crate) fn create_workers(
storage: MetricsStorage<Storage<TokioTimer>>,
state: State, state: State,
actors: ActorCache, actors: ActorCache,
media: MediaCache, media: MediaCache,
config: Config, config: Config,
spawner: Spawner, ) -> std::io::Result<JobServer> {
) -> JobServer {
let deliver_concurrency = config.deliver_concurrency(); let deliver_concurrency = config.deliver_concurrency();
let queue_handle = WorkerConfig::new(Storage::new(ActixTimer), move |queue_handle| { let queue_handle = WorkerConfig::new(storage, move |queue_handle| {
JobState::new( JobState::new(
state.clone(), state.clone(),
actors.clone(), actors.clone(),
JobServer::new(queue_handle), JobServer::new(queue_handle),
media.clone(), media.clone(),
config.clone(), config.clone(),
spawner.clone(),
) )
}) })
.register::<Deliver>() .register::<Deliver>()
@ -74,22 +77,20 @@ pub(crate) fn create_workers(
.set_worker_count("maintenance", 2) .set_worker_count("maintenance", 2)
.set_worker_count("apub", 2) .set_worker_count("apub", 2)
.set_worker_count("deliver", deliver_concurrency) .set_worker_count("deliver", deliver_concurrency)
.start(); .start()?;
queue_handle.every(Duration::from_secs(60 * 5), Listeners); queue_handle.every(Duration::from_secs(60 * 5), Listeners)?;
queue_handle.every(Duration::from_secs(60 * 10), RecordLastOnline); queue_handle.every(Duration::from_secs(60 * 10), RecordLastOnline)?;
JobServer::new(queue_handle) Ok(JobServer::new(queue_handle))
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub(crate) struct JobState { pub(crate) struct JobState {
requests: Requests,
state: State, state: State,
actors: ActorCache, actors: ActorCache,
config: Config, config: Config,
media: MediaCache, media: MediaCache,
node_cache: NodeCache,
job_server: JobServer, job_server: JobServer,
} }
@ -113,15 +114,12 @@ impl JobState {
job_server: JobServer, job_server: JobServer,
media: MediaCache, media: MediaCache,
config: Config, config: Config,
spawner: Spawner,
) -> Self { ) -> Self {
JobState { JobState {
requests: state.requests(&config, spawner), state,
node_cache: state.node_cache(),
actors, actors,
config, config,
media, media,
state,
job_server, job_server,
} }
} }

View file

@ -2,14 +2,14 @@ use crate::{
config::{Config, UrlKind}, config::{Config, UrlKind},
db::Actor, db::Actor,
error::Error, error::Error,
future::BoxFuture,
jobs::{ jobs::{
apub::{get_inboxes, prepare_activity}, apub::{get_inboxes, prepare_activity},
DeliverMany, JobState, DeliverMany, JobState,
}, },
}; };
use activitystreams::{activity::Announce as AsAnnounce, iri_string::types::IriString}; use activitystreams::{activity::Announce as AsAnnounce, iri_string::types::IriString};
use background_jobs::ActixJob; use background_jobs::Job;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Announce { pub(crate) struct Announce {
@ -62,14 +62,15 @@ fn generate_announce(
) )
} }
impl ActixJob for Announce { impl Job for Announce {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Announce"; const NAME: &'static str = "relay::jobs::apub::Announce";
const QUEUE: &'static str = "apub"; const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View file

@ -3,6 +3,7 @@ use crate::{
config::{Config, UrlKind}, config::{Config, UrlKind},
db::Actor, db::Actor,
error::{Error, ErrorKind}, error::{Error, ErrorKind},
future::BoxFuture,
jobs::{apub::prepare_activity, Deliver, JobState, QueryInstance, QueryNodeinfo}, jobs::{apub::prepare_activity, Deliver, JobState, QueryInstance, QueryNodeinfo},
}; };
use activitystreams::{ use activitystreams::{
@ -10,8 +11,7 @@ use activitystreams::{
iri_string::types::IriString, iri_string::types::IriString,
prelude::*, prelude::*,
}; };
use background_jobs::ActixJob; use background_jobs::Job;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Follow { pub(crate) struct Follow {
@ -111,14 +111,15 @@ fn generate_accept_follow(
) )
} }
impl ActixJob for Follow { impl Job for Follow {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Follow"; const NAME: &'static str = "relay::jobs::apub::Follow";
const QUEUE: &'static str = "apub"; const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View file

@ -2,11 +2,11 @@ use crate::{
apub::AcceptedActivities, apub::AcceptedActivities,
db::Actor, db::Actor,
error::{Error, ErrorKind}, error::{Error, ErrorKind},
future::BoxFuture,
jobs::{apub::get_inboxes, DeliverMany, JobState}, jobs::{apub::get_inboxes, DeliverMany, JobState},
}; };
use activitystreams::prelude::*; use activitystreams::prelude::*;
use background_jobs::ActixJob; use background_jobs::Job;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Forward { pub(crate) struct Forward {
@ -47,14 +47,15 @@ impl Forward {
} }
} }
impl ActixJob for Forward { impl Job for Forward {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Forward"; const NAME: &'static str = "relay::jobs::apub::Forward";
const QUEUE: &'static str = "apub"; const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View file

@ -2,10 +2,10 @@ use crate::{
config::UrlKind, config::UrlKind,
db::Actor, db::Actor,
error::Error, error::Error,
future::BoxFuture,
jobs::{apub::generate_undo_follow, Deliver, JobState}, jobs::{apub::generate_undo_follow, Deliver, JobState},
}; };
use background_jobs::ActixJob; use background_jobs::Job;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Reject(pub(crate) Actor); pub(crate) struct Reject(pub(crate) Actor);
@ -33,14 +33,15 @@ impl Reject {
} }
} }
impl ActixJob for Reject { impl Job for Reject {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Reject"; const NAME: &'static str = "relay::jobs::apub::Reject";
const QUEUE: &'static str = "apub"; const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View file

@ -3,11 +3,11 @@ use crate::{
config::UrlKind, config::UrlKind,
db::Actor, db::Actor,
error::Error, error::Error,
future::BoxFuture,
jobs::{apub::generate_undo_follow, Deliver, JobState}, jobs::{apub::generate_undo_follow, Deliver, JobState},
}; };
use activitystreams::prelude::BaseExt; use activitystreams::prelude::BaseExt;
use background_jobs::ActixJob; use background_jobs::Job;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Undo { pub(crate) struct Undo {
@ -48,14 +48,15 @@ impl Undo {
} }
} }
impl ActixJob for Undo { impl Job for Undo {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Undo"; const NAME: &'static str = "relay::jobs::apub::Undo";
const QUEUE: &'static str = "apub"; const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View file

@ -1,11 +1,12 @@
use crate::{ use crate::{
apub::AcceptedActors, apub::AcceptedActors,
error::{Error, ErrorKind}, error::{Error, ErrorKind},
future::BoxFuture,
jobs::JobState, jobs::JobState,
requests::BreakerStrategy,
}; };
use activitystreams::{iri_string::types::IriString, object::Image, prelude::*}; use activitystreams::{iri_string::types::IriString, object::Image, prelude::*};
use background_jobs::ActixJob; use background_jobs::Job;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct QueryContact { pub(crate) struct QueryContact {
@ -32,6 +33,7 @@ impl QueryContact {
async fn perform(self, state: JobState) -> Result<(), Error> { async fn perform(self, state: JobState) -> Result<(), Error> {
let contact_outdated = state let contact_outdated = state
.state
.node_cache .node_cache
.is_contact_outdated(self.actor_id.clone()) .is_contact_outdated(self.actor_id.clone())
.await; .await;
@ -41,8 +43,9 @@ impl QueryContact {
} }
let contact = match state let contact = match state
.state
.requests .requests
.fetch::<AcceptedActors>(&self.contact_id) .fetch::<AcceptedActors>(&self.contact_id, BreakerStrategy::Allow404AndBelow)
.await .await
{ {
Ok(contact) => contact, Ok(contact) => contact,
@ -57,6 +60,7 @@ impl QueryContact {
to_contact(contact).ok_or(ErrorKind::Extract("contact"))?; to_contact(contact).ok_or(ErrorKind::Extract("contact"))?;
state state
.state
.node_cache .node_cache
.set_contact(self.actor_id, username, display_name, url, avatar) .set_contact(self.actor_id, username, display_name, url, avatar)
.await?; .await?;
@ -81,15 +85,16 @@ fn to_contact(contact: AcceptedActors) -> Option<(String, String, IriString, Iri
Some((username, display_name, url, avatar)) Some((username, display_name, url, avatar))
} }
impl ActixJob for QueryContact { impl Job for QueryContact {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::QueryContact"; const NAME: &'static str = "relay::jobs::QueryContact";
const QUEUE: &'static str = "maintenance"; const QUEUE: &'static str = "maintenance";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View file

@ -1,10 +1,11 @@
use crate::{ use crate::{
error::Error, error::Error,
future::BoxFuture,
jobs::{debug_object, JobState}, jobs::{debug_object, JobState},
requests::BreakerStrategy,
}; };
use activitystreams::iri_string::types::IriString; use activitystreams::iri_string::types::IriString;
use background_jobs::{ActixJob, Backoff}; use background_jobs::{Backoff, Job};
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Deliver { pub(crate) struct Deliver {
@ -34,8 +35,13 @@ impl Deliver {
} }
#[tracing::instrument(name = "Deliver", skip(state))] #[tracing::instrument(name = "Deliver", skip(state))]
async fn permform(self, state: JobState) -> Result<(), Error> { async fn perform(self, state: JobState) -> Result<(), Error> {
if let Err(e) = state.requests.deliver(&self.to, &self.data).await { if let Err(e) = state
.state
.requests
.deliver(&self.to, &self.data, BreakerStrategy::Allow401AndBelow)
.await
{
if e.is_breaker() { if e.is_breaker() {
tracing::debug!("Not trying due to failed breaker"); tracing::debug!("Not trying due to failed breaker");
return Ok(()); return Ok(());
@ -50,15 +56,16 @@ impl Deliver {
} }
} }
impl ActixJob for Deliver { impl Job for Deliver {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::Deliver"; const NAME: &'static str = "relay::jobs::Deliver";
const QUEUE: &'static str = "deliver"; const QUEUE: &'static str = "deliver";
const BACKOFF: Backoff = Backoff::Exponential(8); const BACKOFF: Backoff = Backoff::Exponential(8);
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.permform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View file

@ -1,10 +1,10 @@
use crate::{ use crate::{
error::Error, error::Error,
future::BoxFuture,
jobs::{debug_object, Deliver, JobState}, jobs::{debug_object, Deliver, JobState},
}; };
use activitystreams::iri_string::types::IriString; use activitystreams::iri_string::types::IriString;
use background_jobs::ActixJob; use background_jobs::Job;
use futures_util::future::LocalBoxFuture;
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct DeliverMany { pub(crate) struct DeliverMany {
@ -45,14 +45,15 @@ impl DeliverMany {
} }
} }
impl ActixJob for DeliverMany { impl Job for DeliverMany {
type State = JobState; type State = JobState;
type Future = LocalBoxFuture<'static, Result<(), anyhow::Error>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::DeliverMany"; const NAME: &'static str = "relay::jobs::DeliverMany";
const QUEUE: &'static str = "deliver"; const QUEUE: &'static str = "deliver";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View file

@ -1,11 +1,12 @@
use crate::{ use crate::{
config::UrlKind, config::UrlKind,
error::{Error, ErrorKind}, error::{Error, ErrorKind},
future::BoxFuture,
jobs::{Boolish, JobState}, jobs::{Boolish, JobState},
requests::BreakerStrategy,
}; };
use activitystreams::{iri, iri_string::types::IriString}; use activitystreams::{iri, iri_string::types::IriString};
use background_jobs::ActixJob; use background_jobs::Job;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct QueryInstance { pub(crate) struct QueryInstance {
@ -40,15 +41,23 @@ impl QueryInstance {
InstanceApiType::Mastodon => { InstanceApiType::Mastodon => {
let mastodon_instance_uri = iri!(format!("{scheme}://{authority}/api/v1/instance")); let mastodon_instance_uri = iri!(format!("{scheme}://{authority}/api/v1/instance"));
state state
.state
.requests .requests
.fetch_json::<Instance>(&mastodon_instance_uri) .fetch_json::<Instance>(
&mastodon_instance_uri,
BreakerStrategy::Allow404AndBelow,
)
.await .await
} }
InstanceApiType::Misskey => { InstanceApiType::Misskey => {
let msky_meta_uri = iri!(format!("{scheme}://{authority}/api/meta")); let msky_meta_uri = iri!(format!("{scheme}://{authority}/api/meta"));
state state
.state
.requests .requests
.fetch_json_msky::<MisskeyMeta>(&msky_meta_uri) .fetch_json_msky::<MisskeyMeta>(
&msky_meta_uri,
BreakerStrategy::Allow404AndBelow,
)
.await .await
.map(|res| res.into()) .map(|res| res.into())
} }
@ -58,10 +67,12 @@ impl QueryInstance {
#[tracing::instrument(name = "Query instance", skip(state))] #[tracing::instrument(name = "Query instance", skip(state))]
async fn perform(self, state: JobState) -> Result<(), Error> { async fn perform(self, state: JobState) -> Result<(), Error> {
let contact_outdated = state let contact_outdated = state
.state
.node_cache .node_cache
.is_contact_outdated(self.actor_id.clone()) .is_contact_outdated(self.actor_id.clone())
.await; .await;
let instance_outdated = state let instance_outdated = state
.state
.node_cache .node_cache
.is_instance_outdated(self.actor_id.clone()) .is_instance_outdated(self.actor_id.clone())
.await; .await;
@ -123,6 +134,7 @@ impl QueryInstance {
let avatar = state.config.generate_url(UrlKind::Media(uuid)); let avatar = state.config.generate_url(UrlKind::Media(uuid));
state state
.state
.node_cache .node_cache
.set_contact( .set_contact(
self.actor_id.clone(), self.actor_id.clone(),
@ -137,6 +149,7 @@ impl QueryInstance {
let description = ammonia::clean(&description); let description = ammonia::clean(&description);
state state
.state
.node_cache .node_cache
.set_instance( .set_instance(
self.actor_id, self.actor_id,
@ -152,15 +165,16 @@ impl QueryInstance {
} }
} }
impl ActixJob for QueryInstance { impl Job for QueryInstance {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::QueryInstance"; const NAME: &'static str = "relay::jobs::QueryInstance";
const QUEUE: &'static str = "maintenance"; const QUEUE: &'static str = "maintenance";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View file

@ -1,17 +1,18 @@
use crate::{ use crate::{
error::{Error, ErrorKind}, error::{Error, ErrorKind},
future::BoxFuture,
jobs::{Boolish, JobState, QueryContact}, jobs::{Boolish, JobState, QueryContact},
requests::BreakerStrategy,
}; };
use activitystreams::{iri, iri_string::types::IriString, primitives::OneOrMany}; use activitystreams::{iri, iri_string::types::IriString, primitives::OneOrMany};
use background_jobs::ActixJob; use background_jobs::Job;
use std::{fmt::Debug, future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct QueryNodeinfo { pub(crate) struct QueryNodeinfo {
actor_id: IriString, actor_id: IriString,
} }
impl Debug for QueryNodeinfo { impl std::fmt::Debug for QueryNodeinfo {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("QueryNodeinfo") f.debug_struct("QueryNodeinfo")
.field("actor_id", &self.actor_id.to_string()) .field("actor_id", &self.actor_id.to_string())
@ -27,6 +28,7 @@ impl QueryNodeinfo {
#[tracing::instrument(name = "Query node info", skip(state))] #[tracing::instrument(name = "Query node info", skip(state))]
async fn perform(self, state: JobState) -> Result<(), Error> { async fn perform(self, state: JobState) -> Result<(), Error> {
if !state if !state
.state
.node_cache .node_cache
.is_nodeinfo_outdated(self.actor_id.clone()) .is_nodeinfo_outdated(self.actor_id.clone())
.await .await
@ -42,8 +44,9 @@ impl QueryNodeinfo {
let well_known_uri = iri!(format!("{scheme}://{authority}/.well-known/nodeinfo")); let well_known_uri = iri!(format!("{scheme}://{authority}/.well-known/nodeinfo"));
let well_known = match state let well_known = match state
.state
.requests .requests
.fetch_json::<WellKnown>(&well_known_uri) .fetch_json::<WellKnown>(&well_known_uri, BreakerStrategy::Allow404AndBelow)
.await .await
{ {
Ok(well_known) => well_known, Ok(well_known) => well_known,
@ -60,7 +63,12 @@ impl QueryNodeinfo {
return Ok(()); return Ok(());
}; };
let nodeinfo = match state.requests.fetch_json::<Nodeinfo>(&href).await { let nodeinfo = match state
.state
.requests
.fetch_json::<Nodeinfo>(&href, BreakerStrategy::Require2XX)
.await
{
Ok(nodeinfo) => nodeinfo, Ok(nodeinfo) => nodeinfo,
Err(e) if e.is_breaker() => { Err(e) if e.is_breaker() => {
tracing::debug!("Not retrying due to failed breaker"); tracing::debug!("Not retrying due to failed breaker");
@ -70,6 +78,7 @@ impl QueryNodeinfo {
}; };
state state
.state
.node_cache .node_cache
.set_info( .set_info(
self.actor_id.clone(), self.actor_id.clone(),
@ -83,7 +92,7 @@ impl QueryNodeinfo {
.metadata .metadata
.and_then(|meta| meta.into_iter().next().and_then(|meta| meta.staff_accounts)) .and_then(|meta| meta.into_iter().next().and_then(|meta| meta.staff_accounts))
{ {
if let Some(contact_id) = accounts.get(0) { if let Some(contact_id) = accounts.first() {
state state
.job_server .job_server
.queue(QueryContact::new(self.actor_id, contact_id.clone())) .queue(QueryContact::new(self.actor_id, contact_id.clone()))
@ -95,15 +104,16 @@ impl QueryNodeinfo {
} }
} }
impl ActixJob for QueryNodeinfo { impl Job for QueryNodeinfo {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::QueryNodeinfo"; const NAME: &'static str = "relay::jobs::QueryNodeinfo";
const QUEUE: &'static str = "maintenance"; const QUEUE: &'static str = "maintenance";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }
@ -146,7 +156,7 @@ struct Link {
#[serde(untagged)] #[serde(untagged)]
enum MaybeSupported<T> { enum MaybeSupported<T> {
Supported(T), Supported(T),
Unsupported(String), Unsupported(#[allow(unused)] String),
} }
impl<T> MaybeSupported<T> { impl<T> MaybeSupported<T> {
@ -155,8 +165,8 @@ impl<T> MaybeSupported<T> {
} }
} }
struct SupportedVersion(String); struct SupportedVersion(#[allow(unused)] String);
struct SupportedNodeinfo(String); struct SupportedNodeinfo(#[allow(unused)] String);
static SUPPORTED_VERSIONS: &str = "2."; static SUPPORTED_VERSIONS: &str = "2.";
static SUPPORTED_NODEINFO: &str = "http://nodeinfo.diaspora.software/ns/schema/2."; static SUPPORTED_NODEINFO: &str = "http://nodeinfo.diaspora.software/ns/schema/2.";

View file

@ -1,9 +1,9 @@
use crate::{ use crate::{
error::Error, error::Error,
future::BoxFuture,
jobs::{instance::QueryInstance, nodeinfo::QueryNodeinfo, JobState}, jobs::{instance::QueryInstance, nodeinfo::QueryNodeinfo, JobState},
}; };
use background_jobs::ActixJob; use background_jobs::Job;
use std::{future::Future, pin::Pin};
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)] #[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub(crate) struct Listeners; pub(crate) struct Listeners;
@ -23,14 +23,15 @@ impl Listeners {
} }
} }
impl ActixJob for Listeners { impl Job for Listeners {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::Listeners"; const NAME: &'static str = "relay::jobs::Listeners";
const QUEUE: &'static str = "maintenance"; const QUEUE: &'static str = "maintenance";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View file

@ -1,6 +1,5 @@
use crate::{error::Error, jobs::JobState}; use crate::{error::Error, future::BoxFuture, jobs::JobState};
use background_jobs::{ActixJob, Backoff}; use background_jobs::{Backoff, Job};
use std::{future::Future, pin::Pin};
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)] #[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub(crate) struct RecordLastOnline; pub(crate) struct RecordLastOnline;
@ -14,15 +13,16 @@ impl RecordLastOnline {
} }
} }
impl ActixJob for RecordLastOnline { impl Job for RecordLastOnline {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::RecordLastOnline"; const NAME: &'static str = "relay::jobs::RecordLastOnline";
const QUEUE: &'static str = "maintenance"; const QUEUE: &'static str = "maintenance";
const BACKOFF: Backoff = Backoff::Linear(1); const BACKOFF: Backoff = Backoff::Linear(1);
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View file

@ -1,22 +1,27 @@
// need this for ructe // need this for ructe
#![allow(clippy::needless_borrow)] #![allow(clippy::needless_borrow)]
use std::time::Duration;
use activitystreams::iri_string::types::IriString; use activitystreams::iri_string::types::IriString;
use actix_rt::task::JoinHandle;
use actix_web::{middleware::Compress, web, App, HttpServer}; use actix_web::{middleware::Compress, web, App, HttpServer};
use collector::MemoryCollector; use collector::MemoryCollector;
#[cfg(feature = "console")] #[cfg(feature = "console")]
use console_subscriber::ConsoleLayer; use console_subscriber::ConsoleLayer;
use error::Error;
use http_signature_normalization_actix::middleware::VerifySignature; use http_signature_normalization_actix::middleware::VerifySignature;
use metrics_exporter_prometheus::PrometheusBuilder; use metrics_exporter_prometheus::PrometheusBuilder;
use metrics_util::layers::FanoutBuilder; use metrics_util::layers::FanoutBuilder;
use opentelemetry::{sdk::Resource, KeyValue}; use opentelemetry::{trace::TracerProvider, KeyValue};
use opentelemetry_otlp::WithExportConfig; use opentelemetry_otlp::WithExportConfig;
use opentelemetry_sdk::Resource;
use reqwest_middleware::ClientWithMiddleware;
use rustls::ServerConfig; use rustls::ServerConfig;
use tokio::task::JoinHandle;
use tracing_actix_web::TracingLogger; use tracing_actix_web::TracingLogger;
use tracing_error::ErrorLayer; use tracing_error::ErrorLayer;
use tracing_log::LogTracer; use tracing_log::LogTracer;
use tracing_subscriber::{filter::Targets, fmt::format::FmtSpan, layer::SubscriberExt, Layer}; use tracing_subscriber::{filter::Targets, layer::SubscriberExt, Layer};
mod admin; mod admin;
mod apub; mod apub;
@ -27,13 +32,18 @@ mod data;
mod db; mod db;
mod error; mod error;
mod extractors; mod extractors;
mod future;
mod http1;
mod jobs; mod jobs;
mod middleware; mod middleware;
mod requests; mod requests;
mod routes; mod routes;
mod spawner; mod spawner;
mod stream;
mod telegram; mod telegram;
use crate::config::UrlKind;
use self::{ use self::{
args::Args, args::Args,
config::Config, config::Config,
@ -48,16 +58,15 @@ use self::{
fn init_subscriber( fn init_subscriber(
software_name: &'static str, software_name: &'static str,
opentelemetry_url: Option<&IriString>, opentelemetry_url: Option<&IriString>,
) -> Result<(), anyhow::Error> { ) -> color_eyre::Result<()> {
LogTracer::init()?; LogTracer::init()?;
color_eyre::install()?;
let targets: Targets = std::env::var("RUST_LOG") let targets: Targets = std::env::var("RUST_LOG")
.unwrap_or_else(|_| "warn,actix_web=debug,actix_server=debug,tracing_actix_web=info".into()) .unwrap_or_else(|_| "info".into())
.parse()?; .parse()?;
let format_layer = tracing_subscriber::fmt::layer() let format_layer = tracing_subscriber::fmt::layer().with_filter(targets.clone());
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE)
.with_filter(targets.clone());
#[cfg(feature = "console")] #[cfg(feature = "console")]
let console_layer = ConsoleLayer::builder() let console_layer = ConsoleLayer::builder()
@ -74,21 +83,21 @@ fn init_subscriber(
let subscriber = subscriber.with(console_layer); let subscriber = subscriber.with(console_layer);
if let Some(url) = opentelemetry_url { if let Some(url) = opentelemetry_url {
let tracer = let exporter = opentelemetry_otlp::SpanExporter::builder()
opentelemetry_otlp::new_pipeline() .with_tonic()
.tracing() .with_endpoint(url.as_str())
.with_trace_config(opentelemetry::sdk::trace::config().with_resource( .build()?;
Resource::new(vec![KeyValue::new("service.name", software_name)]),
)) let tracer_provider = opentelemetry_sdk::trace::TracerProvider::builder()
.with_exporter( .with_resource(Resource::new(vec![KeyValue::new(
opentelemetry_otlp::new_exporter() "service.name",
.tonic() software_name,
.with_endpoint(url.as_str()), )]))
) .with_batch_exporter(exporter, opentelemetry_sdk::runtime::Tokio)
.install_batch(opentelemetry::runtime::Tokio)?; .build();
let otel_layer = tracing_opentelemetry::layer() let otel_layer = tracing_opentelemetry::layer()
.with_tracer(tracer) .with_tracer(tracer_provider.tracer(software_name))
.with_filter(targets); .with_filter(targets);
let subscriber = subscriber.with(otel_layer); let subscriber = subscriber.with(otel_layer);
@ -100,8 +109,40 @@ fn init_subscriber(
Ok(()) Ok(())
} }
#[actix_rt::main] fn build_client(
async fn main() -> Result<(), anyhow::Error> { user_agent: &str,
timeout_seconds: u64,
proxy: Option<(&IriString, Option<(&str, &str)>)>,
) -> Result<ClientWithMiddleware, Error> {
let builder = reqwest::Client::builder().user_agent(user_agent.to_string());
let builder = if let Some((url, auth)) = proxy {
let proxy = reqwest::Proxy::all(url.as_str())?;
let proxy = if let Some((username, password)) = auth {
proxy.basic_auth(username, password)
} else {
proxy
};
builder.proxy(proxy)
} else {
builder
};
let client = builder
.timeout(Duration::from_secs(timeout_seconds))
.build()?;
let client_with_middleware = reqwest_middleware::ClientBuilder::new(client)
.with(reqwest_tracing::TracingMiddleware::default())
.build();
Ok(client_with_middleware)
}
#[tokio::main]
async fn main() -> color_eyre::Result<()> {
dotenv::dotenv().ok(); dotenv::dotenv().ok();
let config = Config::build()?; let config = Config::build()?;
@ -111,7 +152,8 @@ async fn main() -> Result<(), anyhow::Error> {
let args = Args::new(); let args = Args::new();
if args.any() { if args.any() {
return client_main(config, args).await?; client_main(config, args).await??;
return Ok(());
} }
let collector = MemoryCollector::new(); let collector = MemoryCollector::new();
@ -121,40 +163,40 @@ async fn main() -> Result<(), anyhow::Error> {
.with_http_listener(bind_addr) .with_http_listener(bind_addr)
.build()?; .build()?;
actix_rt::spawn(exporter); tokio::spawn(exporter);
let recorder = FanoutBuilder::default() let recorder = FanoutBuilder::default()
.add_recorder(recorder) .add_recorder(recorder)
.add_recorder(collector.clone()) .add_recorder(collector.clone())
.build(); .build();
metrics::set_boxed_recorder(Box::new(recorder))?; metrics::set_global_recorder(recorder).map_err(|e| color_eyre::eyre::eyre!("{e}"))?;
} else { } else {
collector.install()?; collector.install()?;
} }
tracing::warn!("Opening DB"); tracing::info!("Opening DB");
let db = Db::build(&config)?; let db = Db::build(&config)?;
tracing::warn!("Building caches"); tracing::info!("Building caches");
let actors = ActorCache::new(db.clone()); let actors = ActorCache::new(db.clone());
let media = MediaCache::new(db.clone()); let media = MediaCache::new(db.clone());
server_main(db, actors, media, collector, config).await??; server_main(db, actors, media, collector, config).await?;
tracing::warn!("Application exit"); tracing::info!("Application exit");
Ok(()) Ok(())
} }
fn client_main(config: Config, args: Args) -> JoinHandle<Result<(), anyhow::Error>> { fn client_main(config: Config, args: Args) -> JoinHandle<color_eyre::Result<()>> {
actix_rt::spawn(do_client_main(config, args)) tokio::spawn(do_client_main(config, args))
} }
async fn do_client_main(config: Config, args: Args) -> Result<(), anyhow::Error> { async fn do_client_main(config: Config, args: Args) -> color_eyre::Result<()> {
let client = requests::build_client( let client = build_client(
&config.user_agent(), &config.user_agent(),
config.client_pool_size(),
config.client_timeout(), config.client_timeout(),
); config.proxy_config(),
)?;
if !args.blocks().is_empty() || !args.allowed().is_empty() { if !args.blocks().is_empty() || !args.allowed().is_empty() {
if args.undo() { if args.undo() {
@ -232,34 +274,22 @@ async fn do_client_main(config: Config, args: Args) -> Result<(), anyhow::Error>
Ok(()) Ok(())
} }
fn server_main(
db: Db,
actors: ActorCache,
media: MediaCache,
collector: MemoryCollector,
config: Config,
) -> JoinHandle<Result<(), anyhow::Error>> {
actix_rt::spawn(do_server_main(db, actors, media, collector, config))
}
const VERIFY_RATIO: usize = 7; const VERIFY_RATIO: usize = 7;
async fn do_server_main( async fn server_main(
db: Db, db: Db,
actors: ActorCache, actors: ActorCache,
media: MediaCache, media: MediaCache,
collector: MemoryCollector, collector: MemoryCollector,
config: Config, config: Config,
) -> Result<(), anyhow::Error> { ) -> color_eyre::Result<()> {
tracing::warn!("Creating state"); let client = build_client(
let state = State::build(db.clone()).await?; &config.user_agent(),
config.client_timeout(),
config.proxy_config(),
)?;
if let Some((token, admin_handle)) = config.telegram_info() { tracing::info!("Creating state");
tracing::warn!("Creating telegram handler");
telegram::start(admin_handle.to_owned(), db.clone(), token);
}
let keys = config.open_keys()?;
let (signature_threads, verify_threads) = match config.signature_threads() { let (signature_threads, verify_threads) = match config.signature_threads() {
0 | 1 => (1, 1), 0 | 1 => (1, 1),
@ -272,26 +302,42 @@ async fn do_server_main(
} }
}; };
let spawner = Spawner::build("sign-cpu", signature_threads)?; let verify_spawner = Spawner::build("verify-cpu", verify_threads.try_into()?)?;
let verify_spawner = Spawner::build("verify-cpu", verify_threads)?; let sign_spawner = Spawner::build("sign-cpu", signature_threads.try_into()?)?;
let key_id = config.generate_url(UrlKind::MainKey).to_string();
let state = State::build(db.clone(), key_id, sign_spawner.clone(), client).await?;
if let Some((token, admin_handle)) = config.telegram_info() {
tracing::info!("Creating telegram handler");
telegram::start(admin_handle.to_owned(), db.clone(), token);
}
let cert_resolver = config
.open_keys()
.await?
.map(rustls_channel_resolver::channel::<32>);
let bind_address = config.bind_address(); let bind_address = config.bind_address();
let sign_spawner2 = sign_spawner.clone();
let verify_spawner2 = verify_spawner.clone();
let config2 = config.clone();
let job_store = jobs::build_storage();
let server = HttpServer::new(move || { let server = HttpServer::new(move || {
let requests = state.requests(&config, spawner.clone());
let job_server = create_workers( let job_server = create_workers(
job_store.clone(),
state.clone(), state.clone(),
actors.clone(), actors.clone(),
media.clone(), media.clone(),
config.clone(), config.clone(),
spawner.clone(), )
); .expect("Failed to create job server");
let app = App::new() let app = App::new()
.app_data(web::Data::new(db.clone())) .app_data(web::Data::new(db.clone()))
.app_data(web::Data::new(state.clone())) .app_data(web::Data::new(state.clone()))
.app_data(web::Data::new( .app_data(web::Data::new(
requests.clone().spawner(verify_spawner.clone()), state.requests.clone().spawner(verify_spawner.clone()),
)) ))
.app_data(web::Data::new(actors.clone())) .app_data(web::Data::new(actors.clone()))
.app_data(web::Data::new(config.clone())) .app_data(web::Data::new(config.clone()))
@ -317,7 +363,7 @@ async fn do_server_main(
.wrap(config.digest_middleware().spawner(verify_spawner.clone())) .wrap(config.digest_middleware().spawner(verify_spawner.clone()))
.wrap(VerifySignature::new( .wrap(VerifySignature::new(
MyVerify( MyVerify(
requests.spawner(verify_spawner.clone()), state.requests.clone().spawner(verify_spawner.clone()),
actors.clone(), actors.clone(),
state.clone(), state.clone(),
verify_spawner.clone(), verify_spawner.clone(),
@ -351,24 +397,42 @@ async fn do_server_main(
) )
}); });
if let Some((certs, key)) = keys { if let Some((cert_tx, cert_rx)) = cert_resolver {
tracing::warn!("Binding to {}:{} with TLS", bind_address.0, bind_address.1); let handle = tokio::spawn(async move {
let mut interval = tokio::time::interval(Duration::from_secs(30));
interval.tick().await;
loop {
interval.tick().await;
match config2.open_keys().await {
Ok(Some(key)) => cert_tx.update(key),
Ok(None) => tracing::warn!("Missing TLS keys"),
Err(e) => tracing::error!("Failed to read TLS keys {e}"),
}
}
});
tracing::info!("Binding to {}:{} with TLS", bind_address.0, bind_address.1);
let server_config = ServerConfig::builder() let server_config = ServerConfig::builder()
.with_safe_default_cipher_suites()
.with_safe_default_kx_groups()
.with_safe_default_protocol_versions()?
.with_no_client_auth() .with_no_client_auth()
.with_single_cert(certs, key)?; .with_cert_resolver(cert_rx);
server server
.bind_rustls(bind_address, server_config)? .bind_rustls_0_23(bind_address, server_config)?
.run() .run()
.await?; .await?;
handle.abort();
let _ = handle.await;
} else { } else {
tracing::warn!("Binding to {}:{}", bind_address.0, bind_address.1); tracing::info!("Binding to {}:{}", bind_address.0, bind_address.1);
server.bind(bind_address)?.run().await?; server.bind(bind_address)?.run().await?;
} }
tracing::warn!("Server closed"); sign_spawner2.close().await;
verify_spawner2.close().await;
tracing::info!("Server closed");
Ok(()) Ok(())
} }

View file

@ -4,14 +4,11 @@ use actix_web::{
web::BytesMut, web::BytesMut,
HttpMessage, HttpMessage,
}; };
use futures_util::{
future::TryFutureExt,
stream::{once, TryStreamExt},
};
use std::{ use std::{
future::{ready, Ready}, future::{ready, Ready},
task::{Context, Poll}, task::{Context, Poll},
}; };
use streem::IntoStreamer;
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub(crate) struct DebugPayload(pub bool); pub(crate) struct DebugPayload(pub bool);
@ -53,19 +50,23 @@ where
fn call(&self, mut req: ServiceRequest) -> Self::Future { fn call(&self, mut req: ServiceRequest) -> Self::Future {
if self.0 && req.method() == Method::POST { if self.0 && req.method() == Method::POST {
let pl = req.take_payload(); let mut pl = req.take_payload().into_streamer();
req.set_payload(Payload::Stream { req.set_payload(Payload::Stream {
payload: Box::pin(once( payload: Box::pin(streem::try_from_fn(|yielder| async move {
pl.try_fold(BytesMut::new(), |mut acc, bytes| async { let mut buf = BytesMut::new();
acc.extend(bytes);
Ok(acc) while let Some(bytes) = pl.try_next().await? {
}) buf.extend(bytes);
.map_ok(|bytes| { }
let bytes = bytes.freeze();
tracing::info!("{}", String::from_utf8_lossy(&bytes)); let bytes = buf.freeze();
bytes tracing::info!("{}", String::from_utf8_lossy(&bytes));
}),
)), yielder.yield_ok(bytes).await;
Ok(())
})),
}); });
self.1.call(req) self.1.call(req)

View file

@ -40,7 +40,7 @@ impl Drop for LogOnDrop {
fn drop(&mut self) { fn drop(&mut self) {
if self.arm { if self.arm {
let duration = self.begin.elapsed(); let duration = self.begin.elapsed();
metrics::histogram!("relay.request.complete", duration, "path" => self.path.clone(), "method" => self.method.clone()); metrics::histogram!("relay.request.complete", "path" => self.path.clone(), "method" => self.method.clone()).record(duration);
} }
} }
} }
@ -80,7 +80,7 @@ where
fn call(&self, req: ServiceRequest) -> Self::Future { fn call(&self, req: ServiceRequest) -> Self::Future {
let log_on_drop = LogOnDrop { let log_on_drop = LogOnDrop {
begin: Instant::now(), begin: Instant::now(),
path: req.path().to_string(), path: format!("{:?}", req.match_pattern()),
method: req.method().to_string(), method: req.method().to_string(),
arm: false, arm: false,
}; };

View file

@ -2,16 +2,13 @@ use crate::{
apub::AcceptedActors, apub::AcceptedActors,
data::{ActorCache, State}, data::{ActorCache, State},
error::{Error, ErrorKind}, error::{Error, ErrorKind},
requests::Requests, requests::{BreakerStrategy, Requests},
spawner::Spawner, spawner::Spawner,
}; };
use activitystreams::{base::BaseExt, iri, iri_string::types::IriString}; use activitystreams::{base::BaseExt, iri, iri_string::types::IriString};
use base64::{engine::general_purpose::STANDARD, Engine}; use base64::{engine::general_purpose::STANDARD, Engine};
use http_signature_normalization_actix::{prelude::*, verify::DeprecatedAlgorithm, Spawn}; use http_signature_normalization_actix::{prelude::*, verify::DeprecatedAlgorithm, Spawn};
use rsa::{ use rsa::{pkcs1::EncodeRsaPublicKey, pkcs8::DecodePublicKey, RsaPublicKey};
pkcs1v15::Signature, pkcs1v15::VerifyingKey, pkcs8::DecodePublicKey, sha2::Sha256,
signature::Verifier, RsaPublicKey,
};
use std::{future::Future, pin::Pin}; use std::{future::Future, pin::Pin};
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
@ -73,7 +70,11 @@ impl MyVerify {
actor_id actor_id
} else { } else {
match self.0.fetch::<PublicKeyResponse>(&public_key_id).await { match self
.0
.fetch::<PublicKeyResponse>(&public_key_id, BreakerStrategy::Require2XX)
.await
{
Ok(res) => res.actor_id().ok_or(ErrorKind::MissingId), Ok(res) => res.actor_id().ok_or(ErrorKind::MissingId),
Err(e) => { Err(e) => {
if e.is_gone() { if e.is_gone() {
@ -128,19 +129,23 @@ async fn do_verify(
signing_string: String, signing_string: String,
) -> Result<(), Error> { ) -> Result<(), Error> {
let public_key = RsaPublicKey::from_public_key_pem(public_key.trim())?; let public_key = RsaPublicKey::from_public_key_pem(public_key.trim())?;
let public_key_der = public_key
.to_pkcs1_der()
.map_err(|_| ErrorKind::DerEncode)?;
let public_key = ring::signature::UnparsedPublicKey::new(
&ring::signature::RSA_PKCS1_2048_8192_SHA256,
public_key_der,
);
let span = tracing::Span::current(); let span = tracing::Span::current();
spawner spawner
.spawn_blocking(move || { .spawn_blocking(move || {
span.in_scope(|| { span.in_scope(|| {
let decoded = STANDARD.decode(signature)?; let decoded = STANDARD.decode(signature)?;
let signature =
Signature::try_from(decoded.as_slice()).map_err(ErrorKind::ReadSignature)?;
let verifying_key = VerifyingKey::<Sha256>::new(public_key); public_key
verifying_key .verify(signing_string.as_bytes(), decoded.as_slice())
.verify(signing_string.as_bytes(), &signature) .map_err(|_| ErrorKind::VerifySignature)?;
.map_err(ErrorKind::VerifySignature)?;
Ok(()) as Result<(), Error> Ok(()) as Result<(), Error>
}) })

View file

@ -1,10 +1,10 @@
use crate::{ use crate::{
config::{Config, UrlKind}, config::{Config, UrlKind},
data::State, data::State,
future::LocalBoxFuture,
}; };
use actix_web::web::Data; use actix_web::web::Data;
use actix_webfinger::{Resolver, Webfinger}; use actix_webfinger::{Resolver, Webfinger};
use futures_util::future::LocalBoxFuture;
use rsa_magic_public_key::AsMagicPublicKey; use rsa_magic_public_key::AsMagicPublicKey;
pub(crate) struct RelayResolver; pub(crate) struct RelayResolver;

View file

@ -2,31 +2,42 @@ use crate::{
data::LastOnline, data::LastOnline,
error::{Error, ErrorKind}, error::{Error, ErrorKind},
spawner::Spawner, spawner::Spawner,
stream::{aggregate, limit_stream},
}; };
use activitystreams::iri_string::types::IriString; use activitystreams::iri_string::types::IriString;
use actix_web::http::header::Date; use actix_web::http::header::Date;
use awc::{error::SendRequestError, Client, ClientResponse, Connector};
use base64::{engine::general_purpose::STANDARD, Engine}; use base64::{engine::general_purpose::STANDARD, Engine};
use dashmap::DashMap; use dashmap::DashMap;
use http_signature_normalization_actix::prelude::*; use http_signature_normalization_reqwest::{digest::ring::Sha256, prelude::*};
use rand::thread_rng; use reqwest_middleware::ClientWithMiddleware;
use rsa::{ use ring::{
pkcs1v15::SigningKey, rand::SystemRandom,
sha2::{Digest, Sha256}, signature::{RsaKeyPair, RSA_PKCS1_SHA256},
signature::{RandomizedSigner, SignatureEncoding},
RsaPrivateKey,
}; };
use rsa::{pkcs1::EncodeRsaPrivateKey, RsaPrivateKey};
use std::{ use std::{
sync::Arc, sync::Arc,
time::{Duration, SystemTime}, time::{Duration, SystemTime},
}; };
use tracing_awc::Tracing;
const ONE_SECOND: u64 = 1; const ONE_SECOND: u64 = 1;
const ONE_MINUTE: u64 = 60 * ONE_SECOND; const ONE_MINUTE: u64 = 60 * ONE_SECOND;
const ONE_HOUR: u64 = 60 * ONE_MINUTE; const ONE_HOUR: u64 = 60 * ONE_MINUTE;
const ONE_DAY: u64 = 24 * ONE_HOUR; const ONE_DAY: u64 = 24 * ONE_HOUR;
// 20 KB
const JSON_SIZE_LIMIT: usize = 20 * 1024;
#[derive(Debug)]
pub(crate) enum BreakerStrategy {
// Requires a successful response
Require2XX,
// Allows HTTP 2xx-401
Allow401AndBelow,
// Allows HTTP 2xx-404
Allow404AndBelow,
}
#[derive(Clone)] #[derive(Clone)]
pub(crate) struct Breakers { pub(crate) struct Breakers {
inner: Arc<DashMap<String, Breaker>>, inner: Arc<DashMap<String, Breaker>>,
@ -39,7 +50,7 @@ impl std::fmt::Debug for Breakers {
} }
impl Breakers { impl Breakers {
fn should_try(&self, url: &IriString) -> bool { pub(crate) fn should_try(&self, url: &IriString) -> bool {
if let Some(authority) = url.authority_str() { if let Some(authority) = url.authority_str() {
if let Some(breaker) = self.inner.get(authority) { if let Some(breaker) = self.inner.get(authority) {
breaker.should_try() breaker.should_try()
@ -141,11 +152,10 @@ impl Default for Breaker {
#[derive(Clone)] #[derive(Clone)]
pub(crate) struct Requests { pub(crate) struct Requests {
pool_size: usize, client: ClientWithMiddleware,
client: Client,
key_id: String, key_id: String,
user_agent: String, private_key: Arc<RsaKeyPair>,
private_key: RsaPrivateKey, rng: SystemRandom,
config: Config<Spawner>, config: Config<Spawner>,
breakers: Breakers, breakers: Breakers,
last_online: Arc<LastOnline>, last_online: Arc<LastOnline>,
@ -154,62 +164,39 @@ pub(crate) struct Requests {
impl std::fmt::Debug for Requests { impl std::fmt::Debug for Requests {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Requests") f.debug_struct("Requests")
.field("pool_size", &self.pool_size)
.field("key_id", &self.key_id) .field("key_id", &self.key_id)
.field("user_agent", &self.user_agent)
.field("config", &self.config) .field("config", &self.config)
.field("breakers", &self.breakers) .field("breakers", &self.breakers)
.finish() .finish()
} }
} }
thread_local! {
static CLIENT: std::cell::OnceCell<Client> = std::cell::OnceCell::new();
}
pub(crate) fn build_client(user_agent: &str, pool_size: usize, timeout_seconds: u64) -> Client {
CLIENT.with(|client| {
client
.get_or_init(|| {
let connector = Connector::new().limit(pool_size);
Client::builder()
.connector(connector)
.wrap(Tracing)
.add_default_header(("User-Agent", user_agent.to_string()))
.timeout(Duration::from_secs(timeout_seconds))
.finish()
})
.clone()
})
}
impl Requests { impl Requests {
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub(crate) fn new( pub(crate) fn new(
key_id: String, key_id: String,
private_key: RsaPrivateKey, private_key: RsaPrivateKey,
user_agent: String,
breakers: Breakers, breakers: Breakers,
last_online: Arc<LastOnline>, last_online: Arc<LastOnline>,
pool_size: usize,
timeout_seconds: u64,
spawner: Spawner, spawner: Spawner,
client: ClientWithMiddleware,
) -> Self { ) -> Self {
let private_key_der = private_key.to_pkcs1_der().expect("Can encode der");
let private_key = ring::signature::RsaKeyPair::from_der(private_key_der.as_bytes())
.expect("Key is valid");
Requests { Requests {
pool_size, client,
client: build_client(&user_agent, pool_size, timeout_seconds),
key_id, key_id,
user_agent, private_key: Arc::new(private_key),
private_key, rng: SystemRandom::new(),
config: Config::new().mastodon_compat().spawner(spawner), config: Config::new_with_spawner(spawner).mastodon_compat(),
breakers, breakers,
last_online, last_online,
} }
} }
pub(crate) fn spawner(mut self, spawner: Spawner) -> Self { pub(crate) fn spawner(mut self, spawner: Spawner) -> Self {
self.config = self.config.spawner(spawner); self.config = self.config.set_spawner(spawner);
self self
} }
@ -220,97 +207,132 @@ impl Requests {
async fn check_response( async fn check_response(
&self, &self,
parsed_url: &IriString, parsed_url: &IriString,
res: Result<ClientResponse, SendRequestError>, strategy: BreakerStrategy,
) -> Result<ClientResponse, Error> { res: Result<reqwest::Response, reqwest_middleware::Error>,
) -> Result<reqwest::Response, Error> {
if res.is_err() { if res.is_err() {
self.breakers.fail(&parsed_url); self.breakers.fail(&parsed_url);
} }
let mut res = let res = res?;
res.map_err(|e| ErrorKind::SendRequest(parsed_url.to_string(), e.to_string()))?;
if res.status().is_server_error() { let status = res.status();
let success = match strategy {
BreakerStrategy::Require2XX => status.is_success(),
BreakerStrategy::Allow401AndBelow => (200..=401).contains(&status.as_u16()),
BreakerStrategy::Allow404AndBelow => (200..=404).contains(&status.as_u16()),
};
if !success {
self.breakers.fail(&parsed_url); self.breakers.fail(&parsed_url);
if let Ok(bytes) = res.body().await { if let Ok(s) = res.text().await {
if let Ok(s) = String::from_utf8(bytes.as_ref().to_vec()) { if !s.is_empty() {
if !s.is_empty() { tracing::debug!("Response from {parsed_url}, {s}");
tracing::debug!("Response from {parsed_url}, {s}");
}
} }
} }
return Err(ErrorKind::Status(parsed_url.to_string(), res.status()).into()); return Err(ErrorKind::Status(
parsed_url.to_string(),
crate::http1::status_to_http02(status),
)
.into());
} }
self.last_online.mark_seen(&parsed_url); // only actually succeed a breaker on 2xx response
self.breakers.succeed(&parsed_url); if status.is_success() {
self.last_online.mark_seen(&parsed_url);
self.breakers.succeed(&parsed_url);
}
Ok(res) Ok(res)
} }
#[tracing::instrument(name = "Fetch Json", skip(self), fields(signing_string))] #[tracing::instrument(name = "Fetch Json", skip(self), fields(signing_string))]
pub(crate) async fn fetch_json<T>(&self, url: &IriString) -> Result<T, Error> pub(crate) async fn fetch_json<T>(
&self,
url: &IriString,
strategy: BreakerStrategy,
) -> Result<T, Error>
where where
T: serde::de::DeserializeOwned, T: serde::de::DeserializeOwned,
{ {
self.do_fetch(url, "application/json").await self.do_fetch(url, "application/json", strategy).await
} }
#[tracing::instrument(name = "Fetch Json", skip(self), fields(signing_string))] #[tracing::instrument(name = "Fetch Json", skip(self), fields(signing_string))]
pub(crate) async fn fetch_json_msky<T>(&self, url: &IriString) -> Result<T, Error> pub(crate) async fn fetch_json_msky<T>(
&self,
url: &IriString,
strategy: BreakerStrategy,
) -> Result<T, Error>
where where
T: serde::de::DeserializeOwned, T: serde::de::DeserializeOwned,
{ {
let mut res = self let stream = self
.do_deliver( .do_deliver(
url, url,
&serde_json::json!({}), &serde_json::json!({}),
"application/json", "application/json",
"application/json", "application/json",
strategy,
) )
.await?; .await?
.bytes_stream();
let body = res let body = aggregate(limit_stream(stream, JSON_SIZE_LIMIT)).await?;
.body()
.await
.map_err(|e| ErrorKind::ReceiveResponse(url.to_string(), e.to_string()))?;
Ok(serde_json::from_slice(body.as_ref())?) Ok(serde_json::from_slice(&body)?)
} }
#[tracing::instrument(name = "Fetch Activity+Json", skip(self), fields(signing_string))] #[tracing::instrument(name = "Fetch Activity+Json", skip(self), fields(signing_string))]
pub(crate) async fn fetch<T>(&self, url: &IriString) -> Result<T, Error> pub(crate) async fn fetch<T>(
&self,
url: &IriString,
strategy: BreakerStrategy,
) -> Result<T, Error>
where where
T: serde::de::DeserializeOwned, T: serde::de::DeserializeOwned,
{ {
self.do_fetch(url, "application/activity+json").await self.do_fetch(url, "application/activity+json", strategy)
.await
} }
async fn do_fetch<T>(&self, url: &IriString, accept: &str) -> Result<T, Error> async fn do_fetch<T>(
&self,
url: &IriString,
accept: &str,
strategy: BreakerStrategy,
) -> Result<T, Error>
where where
T: serde::de::DeserializeOwned, T: serde::de::DeserializeOwned,
{ {
let mut res = self.do_fetch_response(url, accept).await?; let stream = self
.do_fetch_response(url, accept, strategy)
.await?
.bytes_stream();
let body = res let body = aggregate(limit_stream(stream, JSON_SIZE_LIMIT)).await?;
.body()
.await
.map_err(|e| ErrorKind::ReceiveResponse(url.to_string(), e.to_string()))?;
Ok(serde_json::from_slice(body.as_ref())?) Ok(serde_json::from_slice(&body)?)
} }
#[tracing::instrument(name = "Fetch response", skip(self), fields(signing_string))] #[tracing::instrument(name = "Fetch response", skip(self), fields(signing_string))]
pub(crate) async fn fetch_response(&self, url: &IriString) -> Result<ClientResponse, Error> { pub(crate) async fn fetch_response(
self.do_fetch_response(url, "*/*").await &self,
url: &IriString,
strategy: BreakerStrategy,
) -> Result<reqwest::Response, Error> {
self.do_fetch_response(url, "*/*", strategy).await
} }
pub(crate) async fn do_fetch_response( pub(crate) async fn do_fetch_response(
&self, &self,
url: &IriString, url: &IriString,
accept: &str, accept: &str,
) -> Result<ClientResponse, Error> { strategy: BreakerStrategy,
) -> Result<reqwest::Response, Error> {
if !self.breakers.should_try(url) { if !self.breakers.should_try(url) {
return Err(ErrorKind::Breaker.into()); return Err(ErrorKind::Breaker.into());
} }
@ -318,25 +340,20 @@ impl Requests {
let signer = self.signer(); let signer = self.signer();
let span = tracing::Span::current(); let span = tracing::Span::current();
let res = self let request = self
.client .client
.get(url.as_str()) .get(url.as_str())
.insert_header(("Accept", accept)) .header("Accept", accept)
.insert_header(Date(SystemTime::now().into())) .header("Date", Date(SystemTime::now().into()).to_string())
.no_decompress() .signature(&self.config, self.key_id.clone(), move |signing_string| {
.signature( span.record("signing_string", signing_string);
self.config.clone(), span.in_scope(|| signer.sign(signing_string))
self.key_id.clone(), })
move |signing_string| { .await?;
span.record("signing_string", signing_string);
span.in_scope(|| signer.sign(signing_string))
},
)
.await?
.send()
.await;
let res = self.check_response(url, res).await?; let res = self.client.execute(request).await;
let res = self.check_response(url, strategy, res).await?;
Ok(res) Ok(res)
} }
@ -346,7 +363,12 @@ impl Requests {
skip_all, skip_all,
fields(inbox = inbox.to_string().as_str(), signing_string) fields(inbox = inbox.to_string().as_str(), signing_string)
)] )]
pub(crate) async fn deliver<T>(&self, inbox: &IriString, item: &T) -> Result<(), Error> pub(crate) async fn deliver<T>(
&self,
inbox: &IriString,
item: &T,
strategy: BreakerStrategy,
) -> Result<(), Error>
where where
T: serde::ser::Serialize + std::fmt::Debug, T: serde::ser::Serialize + std::fmt::Debug,
{ {
@ -355,6 +377,7 @@ impl Requests {
item, item,
"application/activity+json", "application/activity+json",
"application/activity+json", "application/activity+json",
strategy,
) )
.await?; .await?;
Ok(()) Ok(())
@ -366,7 +389,8 @@ impl Requests {
item: &T, item: &T,
content_type: &str, content_type: &str,
accept: &str, accept: &str,
) -> Result<ClientResponse, Error> strategy: BreakerStrategy,
) -> Result<reqwest::Response, Error>
where where
T: serde::ser::Serialize + std::fmt::Debug, T: serde::ser::Serialize + std::fmt::Debug,
{ {
@ -378,12 +402,12 @@ impl Requests {
let span = tracing::Span::current(); let span = tracing::Span::current();
let item_string = serde_json::to_string(item)?; let item_string = serde_json::to_string(item)?;
let (req, body) = self let request = self
.client .client
.post(inbox.as_str()) .post(inbox.as_str())
.insert_header(("Accept", accept)) .header("Accept", accept)
.insert_header(("Content-Type", content_type)) .header("Content-Type", content_type)
.insert_header(Date(SystemTime::now().into())) .header("Date", Date(SystemTime::now().into()).to_string())
.signature_with_digest( .signature_with_digest(
self.config.clone(), self.config.clone(),
self.key_id.clone(), self.key_id.clone(),
@ -394,12 +418,11 @@ impl Requests {
span.in_scope(|| signer.sign(signing_string)) span.in_scope(|| signer.sign(signing_string))
}, },
) )
.await? .await?;
.split();
let res = req.send_body(body).await; let res = self.client.execute(request).await;
let res = self.check_response(inbox, res).await?; let res = self.check_response(inbox, strategy, res).await?;
Ok(res) Ok(res)
} }
@ -407,19 +430,29 @@ impl Requests {
fn signer(&self) -> Signer { fn signer(&self) -> Signer {
Signer { Signer {
private_key: self.private_key.clone(), private_key: self.private_key.clone(),
rng: self.rng.clone(),
} }
} }
} }
struct Signer { struct Signer {
private_key: RsaPrivateKey, private_key: Arc<RsaKeyPair>,
rng: SystemRandom,
} }
impl Signer { impl Signer {
fn sign(&self, signing_string: &str) -> Result<String, Error> { fn sign(&self, signing_string: &str) -> Result<String, Error> {
let signing_key = SigningKey::<Sha256>::new(self.private_key.clone()); let mut signature = vec![0; self.private_key.public().modulus_len()];
let signature =
signing_key.try_sign_with_rng(&mut thread_rng(), signing_string.as_bytes())?; self.private_key
Ok(STANDARD.encode(signature.to_bytes().as_ref())) .sign(
&RSA_PKCS1_SHA256,
&self.rng,
signing_string.as_bytes(),
&mut signature,
)
.map_err(|_| ErrorKind::SignRequest)?;
Ok(STANDARD.encode(&signature))
} }
} }

View file

@ -14,10 +14,11 @@ const MINIFY_CONFIG: minify_html::Cfg = minify_html::Cfg {
keep_html_and_head_opening_tags: false, keep_html_and_head_opening_tags: false,
keep_spaces_between_attributes: true, keep_spaces_between_attributes: true,
keep_comments: false, keep_comments: false,
keep_input_type_text_attr: true,
keep_ssi_comments: false,
preserve_brace_template_syntax: false,
preserve_chevron_percent_template_syntax: false,
minify_css: true, minify_css: true,
minify_css_level_1: true,
minify_css_level_2: false,
minify_css_level_3: false,
minify_js: true, minify_js: true,
remove_bangs: true, remove_bangs: true,
remove_processing_instructions: true, remove_processing_instructions: true,
@ -36,12 +37,16 @@ pub(crate) async fn route(
state: web::Data<State>, state: web::Data<State>,
config: web::Data<Config>, config: web::Data<Config>,
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let all_nodes = state.node_cache().nodes().await?; let all_nodes = state.node_cache.nodes().await?;
let mut nodes = Vec::new(); let mut nodes = Vec::new();
let mut local = Vec::new(); let mut local = Vec::new();
for node in all_nodes { for node in all_nodes {
if !state.is_connected(&node.base) {
continue;
}
if node if node
.base .base
.authority_str() .authority_str()

View file

@ -1,7 +1,15 @@
use crate::{data::MediaCache, error::Error, requests::Requests}; use crate::{
data::MediaCache,
error::Error,
requests::{BreakerStrategy, Requests},
stream::limit_stream,
};
use actix_web::{body::BodyStream, web, HttpResponse}; use actix_web::{body::BodyStream, web, HttpResponse};
use uuid::Uuid; use uuid::Uuid;
// 16 MB
const IMAGE_SIZE_LIMIT: usize = 16 * 1024 * 1024;
#[tracing::instrument(name = "Media", skip(media, requests))] #[tracing::instrument(name = "Media", skip(media, requests))]
pub(crate) async fn route( pub(crate) async fn route(
media: web::Data<MediaCache>, media: web::Data<MediaCache>,
@ -11,15 +19,23 @@ pub(crate) async fn route(
let uuid = uuid.into_inner(); let uuid = uuid.into_inner();
if let Some(url) = media.get_url(uuid).await? { if let Some(url) = media.get_url(uuid).await? {
let res = requests.fetch_response(&url).await?; let res = requests
.fetch_response(&url, BreakerStrategy::Allow404AndBelow)
.await?;
let mut response = HttpResponse::build(res.status()); let mut response = HttpResponse::build(crate::http1::status_to_http02(res.status()));
for (name, value) in res.headers().iter().filter(|(h, _)| *h != "connection") { for (name, value) in res.headers().iter().filter(|(h, _)| *h != "connection") {
response.insert_header((name.clone(), value.clone())); response.insert_header((
crate::http1::name_to_http02(name),
crate::http1::value_to_http02(value),
));
} }
return Ok(response.body(BodyStream::new(res))); return Ok(response.body(BodyStream::new(limit_stream(
res.bytes_stream(),
IMAGE_SIZE_LIMIT,
))));
} }
Ok(HttpResponse::NotFound().finish()) Ok(HttpResponse::NotFound().finish())

View file

@ -1,107 +1,30 @@
use async_cpupool::CpuPool;
use http_signature_normalization_actix::{Canceled, Spawn}; use http_signature_normalization_actix::{Canceled, Spawn};
use std::{ use std::time::Duration;
panic::AssertUnwindSafe,
sync::Arc,
thread::JoinHandle,
time::{Duration, Instant},
};
fn spawner_thread( #[derive(Clone)]
receiver: flume::Receiver<Box<dyn FnOnce() + Send>>,
name: &'static str,
id: usize,
) {
let guard = MetricsGuard::guard(name, id);
while let Ok(f) = receiver.recv() {
let start = Instant::now();
metrics::increment_counter!(format!("relay.{name}.operation.start"), "id" => id.to_string());
let res = std::panic::catch_unwind(AssertUnwindSafe(f));
metrics::increment_counter!(format!("relay.{name}.operation.end"), "complete" => res.is_ok().to_string(), "id" => id.to_string());
metrics::histogram!(format!("relay.{name}.operation.duration"), start.elapsed().as_secs_f64(), "complete" => res.is_ok().to_string(), "id" => id.to_string());
if let Err(e) = res {
tracing::warn!("{name} fn panicked: {e:?}");
}
}
guard.disarm();
}
#[derive(Clone, Debug)]
pub(crate) struct Spawner { pub(crate) struct Spawner {
name: &'static str, pool: CpuPool,
sender: Option<flume::Sender<Box<dyn FnOnce() + Send>>>,
threads: Option<Arc<Vec<JoinHandle<()>>>>,
}
struct MetricsGuard {
name: &'static str,
id: usize,
start: Instant,
armed: bool,
}
impl MetricsGuard {
fn guard(name: &'static str, id: usize) -> Self {
metrics::increment_counter!(format!("relay.{name}.launched"), "id" => id.to_string());
Self {
name,
id,
start: Instant::now(),
armed: true,
}
}
fn disarm(mut self) {
self.armed = false;
}
}
impl Drop for MetricsGuard {
fn drop(&mut self) {
metrics::increment_counter!(format!("relay.{}.closed", self.name), "clean" => (!self.armed).to_string(), "id" => self.id.to_string());
metrics::histogram!(format!("relay.{}.duration", self.name), self.start.elapsed().as_secs_f64(), "clean" => (!self.armed).to_string(), "id" => self.id.to_string());
tracing::warn!("Stopping {} - {}", self.name, self.id);
}
} }
impl Spawner { impl Spawner {
pub(crate) fn build(name: &'static str, threads: usize) -> std::io::Result<Self> { pub(crate) fn build(name: &'static str, threads: u16) -> color_eyre::Result<Self> {
let (sender, receiver) = flume::bounded(8); let pool = CpuPool::configure()
.name(name)
.max_threads(threads)
.build()?;
tracing::warn!("Launching {threads} {name}s"); Ok(Spawner { pool })
}
let threads = (0..threads) pub(crate) async fn close(self) {
.map(|i| { self.pool.close().await;
let receiver = receiver.clone();
std::thread::Builder::new()
.name(format!("{name}-{i}"))
.spawn(move || {
spawner_thread(receiver, name, i);
})
})
.collect::<Result<Vec<_>, _>>()?;
Ok(Spawner {
name,
sender: Some(sender),
threads: Some(Arc::new(threads)),
})
} }
} }
impl Drop for Spawner { impl std::fmt::Debug for Spawner {
fn drop(&mut self) { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.sender.take(); f.debug_struct("Spawner").finish()
if let Some(threads) = self.threads.take().and_then(Arc::into_inner) {
tracing::warn!("Joining {}s", self.name);
for thread in threads {
let _ = thread.join();
}
}
} }
} }
@ -111,9 +34,9 @@ where
{ {
let id = uuid::Uuid::new_v4(); let id = uuid::Uuid::new_v4();
metrics::increment_counter!("relay.spawner.wait-timer.start"); metrics::counter!("relay.spawner.wait-timer.start").increment(1);
let mut interval = actix_rt::time::interval(Duration::from_secs(5)); let mut interval = tokio::time::interval(Duration::from_secs(5));
// pass the first tick (instant) // pass the first tick (instant)
interval.tick().await; interval.tick().await;
@ -124,12 +47,12 @@ where
loop { loop {
tokio::select! { tokio::select! {
out = &mut fut => { out = &mut fut => {
metrics::increment_counter!("relay.spawner.wait-timer.end"); metrics::counter!("relay.spawner.wait-timer.end").increment(1);
return out; return out;
} }
_ = interval.tick() => { _ = interval.tick() => {
counter += 1; counter += 1;
metrics::increment_counter!("relay.spawner.wait-timer.pending"); metrics::counter!("relay.spawner.wait-timer.pending").increment(1);
tracing::warn!("Blocking operation {id} is taking a long time, {} seconds", counter * 5); tracing::warn!("Blocking operation {id} is taking a long time, {} seconds", counter * 5);
} }
} }
@ -144,21 +67,26 @@ impl Spawn for Spawner {
Func: FnOnce() -> Out + Send + 'static, Func: FnOnce() -> Out + Send + 'static,
Out: Send + 'static, Out: Send + 'static,
{ {
let sender = self.sender.as_ref().expect("Sender exists").clone(); let pool = self.pool.clone();
Box::pin(async move { timer(pool.spawn(func)).await.map_err(|_| Canceled) })
}
}
impl http_signature_normalization_reqwest::Spawn for Spawner {
type Future<T> = std::pin::Pin<Box<dyn std::future::Future<Output = Result<T, http_signature_normalization_reqwest::Canceled>> + Send>> where T: Send;
fn spawn_blocking<Func, Out>(&self, func: Func) -> Self::Future<Out>
where
Func: FnOnce() -> Out + Send + 'static,
Out: Send + 'static,
{
let pool = self.pool.clone();
Box::pin(async move { Box::pin(async move {
let (tx, rx) = flume::bounded(1); timer(pool.spawn(func))
.await
let _ = sender .map_err(|_| http_signature_normalization_reqwest::Canceled)
.send_async(Box::new(move || {
if tx.try_send((func)()).is_err() {
tracing::warn!("Requestor hung up");
metrics::increment_counter!("relay.spawner.disconnected");
}
}))
.await;
timer(rx.recv_async()).await.map_err(|_| Canceled)
}) })
} }
} }

59
src/stream.rs Normal file
View file

@ -0,0 +1,59 @@
use crate::error::{Error, ErrorKind};
use actix_web::web::{Bytes, BytesMut};
use futures_core::Stream;
use streem::IntoStreamer;
pub(crate) fn limit_stream<'a, S>(
input: S,
limit: usize,
) -> impl Stream<Item = Result<Bytes, Error>> + Send + 'a
where
S: Stream<Item = reqwest::Result<Bytes>> + Send + 'a,
{
streem::try_from_fn(move |yielder| async move {
let stream = std::pin::pin!(input);
let mut stream = stream.into_streamer();
let mut count = 0;
while let Some(bytes) = stream.try_next().await? {
count += bytes.len();
if count > limit {
return Err(ErrorKind::BodyTooLarge.into());
}
yielder.yield_ok(bytes).await;
}
Ok(())
})
}
pub(crate) async fn aggregate<S>(input: S) -> Result<Bytes, Error>
where
S: Stream<Item = Result<Bytes, Error>>,
{
let stream = std::pin::pin!(input);
let mut streamer = stream.into_streamer();
let mut buf = Vec::new();
while let Some(bytes) = streamer.try_next().await? {
buf.push(bytes);
}
if buf.len() == 1 {
return Ok(buf.pop().expect("buf has exactly one element"));
}
let total_size: usize = buf.iter().map(|b| b.len()).sum();
let mut bytes_mut = BytesMut::with_capacity(total_size);
for bytes in &buf {
bytes_mut.extend_from_slice(&bytes);
}
Ok(bytes_mut.freeze())
}

View file

@ -46,7 +46,7 @@ pub(crate) fn start(admin_handle: String, db: Db, token: &str) {
let bot = Bot::new(token); let bot = Bot::new(token);
let admin_handle = Arc::new(admin_handle); let admin_handle = Arc::new(admin_handle);
actix_rt::spawn(async move { tokio::spawn(async move {
let command_handler = teloxide::filter_command::<Command, _>().endpoint( let command_handler = teloxide::filter_command::<Command, _>().endpoint(
move |bot: Bot, msg: Message, cmd: Command| { move |bot: Bot, msg: Message, cmd: Command| {
let admin_handle = admin_handle.clone(); let admin_handle = admin_handle.clone();
@ -75,7 +75,8 @@ pub(crate) fn start(admin_handle: String, db: Db, token: &str) {
fn is_admin(admin_handle: &str, message: &Message) -> bool { fn is_admin(admin_handle: &str, message: &Message) -> bool {
message message
.from() .from
.as_ref()
.and_then(|user| user.username.as_deref()) .and_then(|user| user.username.as_deref())
.map(|username| username == admin_handle) .map(|username| username == admin_handle)
.unwrap_or(false) .unwrap_or(false)