Compare commits

..

79 commits

Author SHA1 Message Date
asonix
6ff7b59778 Prepare 0.3.116 2024-12-09 20:59:14 -06:00
asonix
d9da352558 Update teloxide 2024-12-09 19:40:33 -06:00
asonix
aea64c726a Update opentelemetry stack 2024-12-09 19:37:35 -06:00
asonix
e243bd4600 Update bcrypt 2024-12-09 19:30:59 -06:00
asonix
a452fb91ba Fix build due to reqwest-tracing semver break 2024-12-09 19:30:30 -06:00
asonix
35acc916f2 flake: Use nixos-24.11 stable 2024-12-09 19:23:49 -06:00
asonix
752067ffb7 Update dependencies (minor & point) 2024-08-05 16:45:32 -05:00
asonix
b308e080af Update console-subscriber 2024-08-05 16:44:39 -05:00
asonix
6ab37dc06f Update opentelemetry stack 2024-08-05 16:43:51 -05:00
asonix
a23b30cc91 Bump version 2024-07-09 16:45:38 -05:00
asonix
1b58a50d44 Merge pull request 'Start upgrading to hyper and http 1' (#3) from asonix/hyper-1 into main
Reviewed-on: https://git.asonix.dog/asonix/relay/pulls/3
2024-07-09 21:39:10 +00:00
asonix
308a945283 Start upgrading to http1 2024-07-09 16:32:05 -05:00
asonix
86cab5d2d9 Update opentelemetry stack 2024-07-09 16:28:00 -05:00
asonix
a70e75665b Update console-subscriber 2024-07-09 16:26:33 -05:00
asonix
f1792c8eb3 Update dashmap 2024-07-09 16:26:00 -05:00
asonix
d918ef1495 Update rustls 2024-07-09 16:24:44 -05:00
asonix
2870789e1f Update background jobs, async-cpupool, metrics 2024-07-09 16:21:53 -05:00
asonix
cda92e7523 Update flake 2024-06-23 13:57:40 -05:00
asonix
43b03a176c Don't fail publish on clippy warnings
unfixable without ructe release
2024-06-23 13:57:28 -05:00
asonix
a465d1ae5b Allow versions to be unused 2024-06-23 13:56:37 -05:00
asonix
4fa7674a35 Move cargo config to config.toml 2024-06-23 13:55:10 -05:00
asonix
8c14d613f7 Prepare v0.3.114 2024-06-23 13:45:10 -05:00
asonix
aff2431681 Update dependencies (minor & point) 2024-06-23 13:42:26 -05:00
asonix
5aa97212b3 Impose limits on the size of downloaded content from foreign servers 2024-06-23 13:35:24 -05:00
asonix
97567cf598 Prepare v0.3.113 2024-05-01 15:45:53 -05:00
asonix
4c663f399e Update dependencies (minor & point) 2024-05-01 15:43:53 -05:00
asonix
8a3256f52a Avoid deadlock of iterating over tree while transacting on that tree 2024-05-01 15:43:08 -05:00
asonix
13a2653fe8 Remove prerelease flag 2024-04-23 14:00:04 -05:00
asonix
8dd9a86d22 Use match_pattern rather than path for metrics differentiation 2024-04-21 11:44:16 -05:00
asonix
5c0c0591dd Prepare 0.3.112 2024-04-14 22:47:38 -05:00
asonix
04ca4e5401 Stable async-cpupool 2024-04-14 19:53:31 -05:00
asonix
1de1d76506 prerelease 2024-04-13 13:57:12 -05:00
asonix
dd9225bb89 Prepare v0.3.111 2024-04-07 11:53:24 -05:00
asonix
b577730836 Fix build 2024-04-07 11:40:57 -05:00
asonix
21883c168b BROKEN! Start collecting more metrics about various sizes 2024-04-07 11:04:03 -05:00
asonix
76a0c79369 Update base64, ammonia 2024-04-06 13:42:29 -05:00
asonix
6444782db9 Bump version, Update dependencies (minor & point) 2024-04-06 13:34:54 -05:00
asonix
14aea3256d Update dependencies (minor & point) 2024-03-23 19:10:13 -05:00
asonix
f4f2aa2025 Update flake 2024-03-23 19:09:53 -05:00
asonix
615271fe80 Update opentelemetry dependencies, other dependencies minor & point 2024-03-10 20:09:16 -05:00
asonix
4aed601664 No console by default 2024-02-25 21:08:17 -06:00
asonix
bf21f05aca Strip release binaries 2024-02-12 15:16:20 -06:00
asonix
e69f6c6edb Remove prerelease marker 2024-02-12 14:32:46 -06:00
asonix
1e05eb4fe4 Bump version 2024-02-12 13:46:44 -06:00
asonix
7f09ac3edd Update dependencies (minor & point) 2024-02-12 13:42:45 -06:00
asonix
4788ad332a Update image version in docker-compose 2024-02-11 14:56:26 -06:00
asonix
1fd82915d3 Remove bad argument 2024-02-11 14:52:57 -06:00
asonix
0472082a97 Add actions, remove drone 2024-02-11 14:49:22 -06:00
asonix
c8250acce7 Bump version 2024-02-05 00:25:15 -06:00
asonix
b074759eb4 Update background-jobs, rework errors 2024-02-05 00:24:49 -06:00
asonix
ed399f1531 Be more accurate for reqwest errors 2024-02-04 20:51:25 -06:00
asonix
7e39acdcb0 Update config 2024-02-04 20:28:18 -06:00
asonix
894d096622 Bump version 2024-02-04 20:25:59 -06:00
asonix
05e31254ba Update rustls for actix-web, log less 2024-02-04 20:25:50 -06:00
asonix
086ca9fbf2 Support live-reloading TLS certificate 2024-01-31 16:49:23 -06:00
asonix
603fcc6e57 Bump version 2024-01-18 13:35:00 -05:00
asonix
6b8f15ee08 Use stable background-jobs 2024-01-18 13:34:10 -05:00
asonix
53939f8ae8 Go back to job-server per core 2024-01-18 12:31:26 -05:00
asonix
b53b34c515 Update dependencies (minor & point) 2024-01-14 16:16:56 -05:00
asonix
6dcdf2fc87 clippy 2024-01-14 16:10:32 -05:00
asonix
83e5619eb4 Update flake.lock 2024-01-14 16:10:19 -05:00
asonix
9090bb5c62 Bump version 2024-01-14 15:59:16 -05:00
asonix
d862bf8106 Use tokio rather than actix-rt 2024-01-14 15:56:07 -05:00
asonix
417553e643 Bump version 2024-01-09 18:09:51 -06:00
asonix
a2456c3d5f Update dependencies (minor & point) 2024-01-09 18:08:10 -06:00
asonix
2b3cb8db92 clippy 2024-01-08 17:10:31 -06:00
asonix
18f1096221 Update version 2024-01-08 17:06:02 -06:00
asonix
c640567206 Update to newest background-jobs, implement Job rather than ActixJob 2024-01-08 17:00:15 -06:00
asonix
36aa9120ea Update metrics 2024-01-07 12:43:58 -06:00
asonix
e377f3988b Update minify-html, dependencies (minor & point) 2024-01-07 12:10:43 -06:00
asonix
8c811710ac Bump version 2023-11-25 21:27:05 -06:00
asonix
e4f665d75f use stable async-cpupool 2023-11-25 21:17:59 -06:00
asonix
4383357abe update flake 2023-11-25 20:27:20 -06:00
asonix
f70af22c6a clippy 2023-11-25 20:27:11 -06:00
asonix
8bce3d172f Update streem 2023-11-25 20:20:38 -06:00
asonix
8540e93469 Use async-cpupool 2023-11-25 20:18:11 -06:00
asonix
708e7da301 Update opentelemetry, ring, http-signature-normalization, tracing-log 2023-11-25 20:16:13 -06:00
asonix
a0f9827e18 Bump version 2023-09-09 18:10:31 -04:00
asonix
9ebed87cde Update http-signature-normalization-actix 2023-09-09 18:09:24 -04:00
45 changed files with 3242 additions and 1966 deletions

View file

@ -1,421 +0,0 @@
kind: pipeline
type: docker
name: clippy
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: clippy
image: asonix/rust-builder:latest-linux-amd64
pull: always
commands:
- rustup component add clippy
- cargo clippy --no-deps -- -D warnings
trigger:
event:
- push
- pull_request
- tag
---
kind: pipeline
type: docker
name: tests
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: tests
image: asonix/rust-builder:latest-linux-amd64
pull: always
commands:
- cargo test
trigger:
event:
- push
- pull_request
- tag
---
kind: pipeline
type: docker
name: check-amd64
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: check
image: asonix/rust-builder:latest-linux-amd64
pull: always
commands:
- cargo check --target=$TARGET
trigger:
event:
- push
- pull_request
---
kind: pipeline
type: docker
name: build-amd64
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: build
image: asonix/rust-builder:latest-linux-amd64
pull: always
commands:
- cargo build --target=$TARGET --release
- $TOOL-strip target/$TARGET/release/relay
- cp target/$TARGET/release/relay .
- cp relay relay-linux-amd64
- name: push
image: plugins/docker:20
settings:
username: asonix
password:
from_secret: dockerhub_token
repo: asonix/relay
dockerfile: docker/drone/Dockerfile
auto_tag: true
auto_tag_suffix: linux-amd64
build_args:
- REPO_ARCH=amd64
- name: publish
image: plugins/gitea-release:1
settings:
api_key:
from_secret: gitea_token
base_url: https://git.asonix.dog
files:
- relay-linux-amd64
depends_on:
- clippy
- tests
trigger:
event:
- tag
---
kind: pipeline
type: docker
name: check-arm64v8
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: check
image: asonix/rust-builder:latest-linux-arm64v8
pull: always
commands:
- cargo check --target=$TARGET
trigger:
event:
- push
- pull_request
---
kind: pipeline
type: docker
name: build-arm64v8
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: build
image: asonix/rust-builder:latest-linux-arm64v8
pull: always
commands:
- cargo build --target=$TARGET --release
- $TOOL-strip target/$TARGET/release/relay
- cp target/$TARGET/release/relay .
- cp relay relay-linux-arm64v8
- name: push
image: plugins/docker:20
settings:
username: asonix
password:
from_secret: dockerhub_token
repo: asonix/relay
dockerfile: docker/drone/Dockerfile
auto_tag: true
auto_tag_suffix: linux-arm64v8
build_args:
- REPO_ARCH=arm64v8
- name: publish
image: plugins/gitea-release:1
settings:
api_key:
from_secret: gitea_token
base_url: https://git.asonix.dog
files:
- relay-linux-arm64v8
depends_on:
- clippy
- tests
trigger:
event:
- tag
---
kind: pipeline
type: docker
name: check-arm32v7
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: check
image: asonix/rust-builder:latest-linux-arm32v7
pull: always
commands:
- cargo check --target=$TARGET
trigger:
event:
- push
- pull_request
---
kind: pipeline
type: docker
name: build-arm32v7
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: build
image: asonix/rust-builder:latest-linux-arm32v7
pull: always
commands:
- cargo build --target=$TARGET --release
- $TOOL-strip target/$TARGET/release/relay
- cp target/$TARGET/release/relay .
- cp relay relay-linux-arm32v7
- name: push
image: plugins/docker:20
settings:
username: asonix
password:
from_secret: dockerhub_token
repo: asonix/relay
dockerfile: docker/drone/Dockerfile
auto_tag: true
auto_tag_suffix: linux-arm32v7
build_args:
- REPO_ARCH=arm32v7
- name: publish
image: plugins/gitea-release:1
settings:
api_key:
from_secret: gitea_token
base_url: https://git.asonix.dog
files:
- relay-linux-arm32v7
depends_on:
- clippy
- tests
trigger:
event:
- tag
---
kind: pipeline
type: docker
name: manifest
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: manifest
image: plugins/manifest:1
settings:
username: asonix
password:
from_secret: dockerhub_token
dump: true
auto_tag: true
ignore_missing: true
spec: docker/drone/manifest.tmpl
depends_on:
- build-amd64
- build-arm64v8
- build-arm32v7
trigger:
event:
- tag
---
kind: pipeline
type: docker
name: publish-crate
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: publish
image: asonix/rust-builder:latest-linux-amd64
pull: always
environment:
CRATES_IO_TOKEN:
from_secret: crates_io_token
commands:
- cargo publish --token $CRATES_IO_TOKEN
depends_on:
- build-amd64
- build-arm64v8
- build-arm32v7
trigger:
event:
- tag

View file

@ -0,0 +1,61 @@
on:
push:
branches:
- '*'
pull_request:
branches:
- main
jobs:
clippy:
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Clippy
run: |
cargo clippy --no-default-features -- -D warnings
tests:
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Test
run: cargo test
check:
strategy:
fail-fast: false
matrix:
target:
- x86_64-unknown-linux-musl
- armv7-unknown-linux-musleabihf
- aarch64-unknown-linux-musl
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Debug builds
run: cargo zigbuild --target ${{ matrix.target }}

View file

@ -0,0 +1,226 @@
on:
push:
tags:
- 'v*.*.*'
env:
REGISTRY_IMAGE: asonix/relay
jobs:
clippy:
runs-on: base-image
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Clippy
run: |
# cargo clippy --no-default-features -- -D warnings
cargo clippy --no-default-features
tests:
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Test
run: cargo test
build:
needs:
- clippy
- tests
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
strategy:
fail-fast: false
matrix:
info:
- target: x86_64-unknown-linux-musl
artifact: linux-amd64
platform: linux/amd64
- target: armv7-unknown-linux-musleabihf
artifact: linux-arm32v7
platform: linux/arm/v7
- target: aarch64-unknown-linux-musl
artifact: linux-arm64v8
platform: linux/arm64
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Prepare Platform
run: |
platform=${{ matrix.info.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
shell: bash
-
name: Docker meta
id: meta
uses: https://github.com/docker/metadata-action@v5
with:
images: ${{ env.REGISTRY_IMAGE }}
flavor: |
latest=auto
suffix=-${{ matrix.info.artifact }}
tags: |
type=raw,value=latest,enable={{ is_default_branch }}
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
-
name: Set up QEMU
uses: https://github.com/docker/setup-qemu-action@v3
-
name: Set up Docker Buildx
uses: https://github.com/docker/setup-buildx-action@v3
-
name: Docker login
uses: https://github.com/docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
name: Compile relay
run: cargo zigbuild --target ${{ matrix.info.target }} --release
-
name: Prepare artifacts
run: |
mkdir artifacts
cp target/${{ matrix.info.target }}/release/relay artifacts/relay-${{ matrix.info.artifact }}
-
uses: https://github.com/actions/upload-artifact@v3
with:
name: binaries
path: artifacts/
-
name: Prepare binary
run: |
cp target/${{ matrix.info.target }}/release/relay docker/forgejo/relay
-
name: Build and push ${{ matrix.info.platform }} docker image
id: build
uses: docker/build-push-action@v5
with:
context: ./docker/forgejo
platforms: ${{ matrix.info.platform }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
outputs: type=image,name=${{ env.REGISTRY_IMAGE }},name-canonical=true,push=true
-
name: Export digest
run: |
mkdir -p /tmp/digests
digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${digest#sha256:}"
echo "Created /tmp/digests/${digest#sha256:}"
shell: bash
-
name: Upload ${{ matrix.info.platform }} digest
uses: https://github.com/actions/upload-artifact@v3
with:
name: digests
path: /tmp/digests/*
if-no-files-found: error
retention-days: 1
publish-docker:
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
needs: [build]
steps:
-
name: Download digests
uses: https://github.com/actions/download-artifact@v3
with:
name: digests
path: /tmp/digests
pattern: digests-*
merge-multiple: true
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
-
name: Docker login
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
name: Docker meta
id: meta
uses: https://github.com/docker/metadata-action@v5
with:
images: ${{ env.REGISTRY_IMAGE }}
flavor: |
latest=auto
tags: |
type=raw,value=latest,enable={{ is_default_branch }}
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
-
name: Create manifest list and push
working-directory: /tmp/digests
run: |
tags=$(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "${DOCKER_METADATA_OUTPUT_JSON}")
images=$(printf "${{ env.REGISTRY_IMAGE }}@sha256:%s " *)
echo "Running 'docker buildx imagetools create ${tags[@]} ${images[@]}'"
docker buildx imagetools create ${tags[@]} ${images[@]}
shell: bash
-
name: Inspect Image
run: |
docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ steps.meta.outputs.version }}
publish-forgejo:
needs: [build]
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
- uses: https://github.com/actions/download-artifact@v3
with:
name: binaries
path: artifacts/
merge-multiple: true
- uses: actions/forgejo-release@v1
with:
direction: upload
token: ${{ secrets.GITHUB_TOKEN }}
release-dir: artifacts/
publish-crate:
needs: [build]
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Publish Crate
run: cargo publish --token ${{ secrets.CRATES_IO_TOKEN }}

3294
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,11 +1,11 @@
[package] [package]
name = "ap-relay" name = "ap-relay"
description = "A simple activitypub relay" description = "A simple activitypub relay"
version = "0.3.103" version = "0.3.116"
authors = ["asonix <asonix@asonix.dog>"] authors = ["asonix <asonix@asonix.dog>"]
license = "AGPL-3.0" license = "AGPL-3.0"
readme = "README.md" readme = "README.md"
repository = "https://git.asonix.dog/asonix/ap-relay" repository = "https://git.asonix.dog/asonix/relay"
keywords = ["activitypub", "relay"] keywords = ["activitypub", "relay"]
edition = "2021" edition = "2021"
build = "src/build.rs" build = "src/build.rs"
@ -14,96 +14,101 @@ build = "src/build.rs"
name = "relay" name = "relay"
path = "src/main.rs" path = "src/main.rs"
[profile.release]
strip = true
[features] [features]
console = ["console-subscriber"] console = ["dep:console-subscriber"]
default = [] default = []
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
anyhow = "1.0" actix-web = { version = "4.4.0", default-features = false, features = ["compress-brotli", "compress-gzip", "rustls-0_23"] }
actix-rt = "2.7.0"
actix-web = { version = "4.4.0", default-features = false, features = ["compress-brotli", "compress-gzip", "rustls-0_21"] }
actix-webfinger = { version = "0.5.0", default-features = false } actix-webfinger = { version = "0.5.0", default-features = false }
activitystreams = "0.7.0-alpha.25" activitystreams = "0.7.0-alpha.25"
activitystreams-ext = "0.1.0-alpha.3" activitystreams-ext = "0.1.0-alpha.3"
ammonia = "3.1.0" ammonia = "4.0.0"
bcrypt = "0.15" async-cpupool = "0.3.0"
base64 = "0.21" bcrypt = "0.16"
base64 = "0.22"
clap = { version = "4.0.0", features = ["derive"] } clap = { version = "4.0.0", features = ["derive"] }
config = "0.13.0" color-eyre = "0.6.2"
console-subscriber = { version = "0.1", optional = true } config = { version = "0.14.0", default-features = false, features = ["toml", "json", "yaml"] }
dashmap = "5.1.0" console-subscriber = { version = "0.4", optional = true }
dashmap = "6.0.1"
dotenv = "0.15.0" dotenv = "0.15.0"
flume = "0.11.0" futures-core = "0.3.30"
lru = "0.11.0" lru = "0.12.0"
metrics = "0.21.0" metrics = "0.23.0"
metrics-exporter-prometheus = { version = "0.12.0", default-features = false, features = [ metrics-exporter-prometheus = { version = "0.15.0", default-features = false, features = [
"http-listener", "http-listener",
] } ] }
metrics-util = "0.15.0" metrics-util = "0.17.0"
mime = "0.3.16" mime = "0.3.16"
minify-html = "0.11.0" minify-html = "0.15.0"
opentelemetry = { version = "0.20", features = ["rt-tokio"] } opentelemetry = "0.27.1"
opentelemetry-otlp = "0.13" opentelemetry_sdk = { version = "0.27", features = ["rt-tokio"] }
opentelemetry-otlp = { version = "0.27", features = ["grpc-tonic"] }
pin-project-lite = "0.2.9" pin-project-lite = "0.2.9"
quanta = "0.11.0" # pinned to metrics-util
quanta = "0.12.0"
rand = "0.8" rand = "0.8"
reqwest = { version = "0.11", default-features = false, features = ["rustls-tls", "stream"]} reqwest = { version = "0.12", default-features = false, features = ["rustls-tls", "stream"]}
reqwest-middleware = "0.2" reqwest-middleware = { version = "0.4", default-features = false, features = ["json"] }
reqwest-tracing = "0.4.5" reqwest-tracing = "0.5.0"
ring = "0.16.20" ring = "0.17.5"
rsa = { version = "0.9" } rsa = "0.9"
rsa-magic-public-key = "0.8.0" rsa-magic-public-key = "0.8.0"
rustls = "0.21.0" rustls = { version = "0.23.0", default-features = false, features = ["ring", "logging", "std", "tls12"] }
rustls-pemfile = "1.0.1" rustls-channel-resolver = "0.3.0"
rustls-pemfile = "2"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0" serde_json = "1.0"
sled = "0.34.7" sled = "0.34.7"
teloxide = { version = "0.12.0", default-features = false, features = [ streem = "0.2.0"
teloxide = { version = "0.13.0", default-features = false, features = [
"ctrlc_handler", "ctrlc_handler",
"macros", "macros",
"rustls", "rustls",
] } ] }
thiserror = "1.0" thiserror = "2.0"
time = { version = "0.3.17", features = ["serde"] } time = { version = "0.3.17", features = ["serde"] }
tracing = "0.1" tracing = "0.1"
tracing-error = "0.2" tracing-error = "0.2"
tracing-futures = "0.2" tracing-log = "0.2"
tracing-log = "0.1" tracing-opentelemetry = "0.28"
tracing-opentelemetry = "0.21"
tracing-subscriber = { version = "0.3", features = [ tracing-subscriber = { version = "0.3", features = [
"ansi", "ansi",
"env-filter", "env-filter",
"fmt", "fmt",
] } ] }
tokio = { version = "1", features = ["macros", "sync"] } tokio = { version = "1", features = ["full", "tracing"] }
uuid = { version = "1", features = ["v4", "serde"] } uuid = { version = "1", features = ["v4", "serde"] }
streem = "0.1.0"
[dependencies.background-jobs] [dependencies.background-jobs]
version = "0.15.0" version = "0.19.0"
default-features = false default-features = false
features = ["background-jobs-actix", "error-logging"] features = ["error-logging", "metrics", "tokio"]
[dependencies.http-signature-normalization-actix] [dependencies.http-signature-normalization-actix]
version = "0.10.1" version = "0.11.1"
default-features = false default-features = false
features = ["server", "ring"] features = ["server", "ring"]
[dependencies.http-signature-normalization-reqwest] [dependencies.http-signature-normalization-reqwest]
version = "0.10.0" version = "0.13.0"
default-features = false default-features = false
features = ["middleware", "ring"] features = ["middleware", "ring"]
[dependencies.tracing-actix-web] [dependencies.tracing-actix-web]
version = "0.7.6" version = "0.7.9"
[build-dependencies] [build-dependencies]
anyhow = "1.0" color-eyre = "0.6.2"
dotenv = "0.15.0" dotenv = "0.15.0"
ructe = { version = "0.17.0", features = ["sass", "mime03"] } ructe = { version = "0.17.0", features = ["sass", "mime03"] }
toml = "0.7.0" toml = "0.8.0"
[profile.dev.package.rsa] [profile.dev.package.rsa]
opt-level = 3 opt-level = 3

View file

@ -1,11 +0,0 @@
ARG REPO_ARCH
FROM asonix/rust-runner:latest-linux-$REPO_ARCH
COPY relay /usr/local/bin/relay
USER app
EXPOSE 8080
VOLUME /mnt
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["/usr/local/bin/relay"]

View file

@ -1,25 +0,0 @@
image: asonix/relay:{{#if build.tag}}{{trimPrefix "v" build.tag}}{{else}}latest{{/if}}
{{#if build.tags}}
tags:
{{#each build.tags}}
- {{this}}
{{/each}}
{{/if}}
manifests:
-
image: asonix/relay:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-amd64
platform:
architecture: amd64
os: linux
-
image: asonix/relay:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-arm64v8
platform:
architecture: arm64
os: linux
variant: v8
-
image: asonix/relay:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-arm32v7
platform:
architecture: arm
os: linux
variant: v7

24
docker/forgejo/Dockerfile Normal file
View file

@ -0,0 +1,24 @@
FROM alpine:3.19
ARG UID=991
ARG GID=991
ENV \
UID=${UID} \
GID=${GID}
USER root
RUN \
addgroup -g "${GID}" app && \
adduser -D -G app -u "${UID}" -g "" -h /opt/app app && \
apk add tini && \
chown -R app:app /mnt
COPY relay /usr/local/bin/relay
USER app
EXPOSE 6669
EXPOSE 8080
VOLUME /mnt
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["/usr/local/bin/relay"]

View file

@ -2,7 +2,7 @@ version: '3.3'
services: services:
relay: relay:
image: asonix/relay:0.3.85 image: asonix/relay:0.3.115
ports: ports:
- "8079:8079" - "8079:8079"
restart: always restart: always

14
flake.lock generated
View file

@ -5,11 +5,11 @@
"systems": "systems" "systems": "systems"
}, },
"locked": { "locked": {
"lastModified": 1692799911, "lastModified": 1710146030,
"narHash": "sha256-3eihraek4qL744EvQXsK1Ha6C3CR7nnT8X2qWap4RNk=", "narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide", "owner": "numtide",
"repo": "flake-utils", "repo": "flake-utils",
"rev": "f9e7cf818399d17d347f847525c5a5a8032e4e44", "rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -20,16 +20,16 @@
}, },
"nixpkgs": { "nixpkgs": {
"locked": { "locked": {
"lastModified": 1693003285, "lastModified": 1733550349,
"narHash": "sha256-5nm4yrEHKupjn62MibENtfqlP6pWcRTuSKrMiH9bLkc=", "narHash": "sha256-NcGumB4Lr6KSDq+nIqXtNA8QwAQKDSZT7N9OTGWbTrs=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "5690c4271f2998c304a45c91a0aeb8fb69feaea7", "rev": "e2605d0744c2417b09f8bf850dfca42fcf537d34",
"type": "github" "type": "github"
}, },
"original": { "original": {
"owner": "NixOS", "owner": "NixOS",
"ref": "nixos-unstable", "ref": "nixos-24.11",
"repo": "nixpkgs", "repo": "nixpkgs",
"type": "github" "type": "github"
} }

View file

@ -2,7 +2,7 @@
description = "relay"; description = "relay";
inputs = { inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
flake-utils.url = "github:numtide/flake-utils"; flake-utils.url = "github:numtide/flake-utils";
}; };

View file

@ -1,17 +1,14 @@
{ lib { lib
, nixosTests , nixosTests
, protobuf
, rustPlatform , rustPlatform
}: }:
rustPlatform.buildRustPackage { rustPlatform.buildRustPackage {
pname = "relay"; pname = "relay";
version = "0.3.103"; version = "0.3.116";
src = ./.; src = ./.;
cargoLock.lockFile = ./Cargo.lock; cargoLock.lockFile = ./Cargo.lock;
PROTOC = "${protobuf}/bin/protoc";
PROTOC_INCLUDE = "${protobuf}/include";
RUSTFLAGS = "--cfg tokio_unstable"; RUSTFLAGS = "--cfg tokio_unstable";
nativeBuildInputs = [ ]; nativeBuildInputs = [ ];

View file

@ -5,7 +5,6 @@ use crate::{
error::{Error, ErrorKind}, error::{Error, ErrorKind},
extractors::XApiToken, extractors::XApiToken,
}; };
use actix_web::http::header::Header;
use reqwest_middleware::ClientWithMiddleware; use reqwest_middleware::ClientWithMiddleware;
use serde::de::DeserializeOwned; use serde::de::DeserializeOwned;
@ -87,13 +86,17 @@ async fn get_results<T: DeserializeOwned>(
let res = client let res = client
.get(iri.as_str()) .get(iri.as_str())
.header(XApiToken::name(), x_api_token.to_string()) .header(XApiToken::http1_name(), x_api_token.to_string())
.send() .send()
.await .await
.map_err(|e| ErrorKind::SendRequest(iri.to_string(), e.to_string()))?; .map_err(|e| ErrorKind::SendRequest(iri.to_string(), e.to_string()))?;
if !res.status().is_success() { if !res.status().is_success() {
return Err(ErrorKind::Status(iri.to_string(), res.status()).into()); return Err(ErrorKind::Status(
iri.to_string(),
crate::http1::status_to_http02(res.status()),
)
.into());
} }
let t = res let t = res
@ -116,7 +119,7 @@ async fn post_domains(
let res = client let res = client
.post(iri.as_str()) .post(iri.as_str())
.header(XApiToken::name(), x_api_token.to_string()) .header(XApiToken::http1_name(), x_api_token.to_string())
.json(&Domains { domains }) .json(&Domains { domains })
.send() .send()
.await .await

View file

@ -21,7 +21,7 @@ fn git_info() {
} }
} }
fn version_info() -> Result<(), anyhow::Error> { fn version_info() -> color_eyre::Result<()> {
let cargo_toml = Path::new(&std::env::var("CARGO_MANIFEST_DIR")?).join("Cargo.toml"); let cargo_toml = Path::new(&std::env::var("CARGO_MANIFEST_DIR")?).join("Cargo.toml");
let mut file = File::open(cargo_toml)?; let mut file = File::open(cargo_toml)?;
@ -42,7 +42,7 @@ fn version_info() -> Result<(), anyhow::Error> {
Ok(()) Ok(())
} }
fn main() -> Result<(), anyhow::Error> { fn main() -> color_eyre::Result<()> {
dotenv::dotenv().ok(); dotenv::dotenv().ok();
git_info(); git_info();

View file

@ -1,4 +1,4 @@
use metrics::{Key, Recorder, SetRecorderError}; use metrics::{Key, Metadata, Recorder, SetRecorderError};
use metrics_util::{ use metrics_util::{
registry::{AtomicStorage, GenerationalStorage, Recency, Registry}, registry::{AtomicStorage, GenerationalStorage, Recency, Registry},
MetricKindMask, Summary, MetricKindMask, Summary,
@ -15,6 +15,10 @@ const MINUTES: u64 = 60 * SECONDS;
const HOURS: u64 = 60 * MINUTES; const HOURS: u64 = 60 * MINUTES;
const DAYS: u64 = 24 * HOURS; const DAYS: u64 = 24 * HOURS;
pub(crate) fn recordable(len: usize) -> u32 {
((len as u64) % u64::from(u32::MAX)) as u32
}
type DistributionMap = BTreeMap<Vec<(String, String)>, Summary>; type DistributionMap = BTreeMap<Vec<(String, String)>, Summary>;
#[derive(Clone)] #[derive(Clone)]
@ -289,7 +293,7 @@ impl Inner {
} }
let mut d = self.distributions.write().unwrap(); let mut d = self.distributions.write().unwrap();
let outer_entry = d.entry(name.clone()).or_insert_with(BTreeMap::new); let outer_entry = d.entry(name.clone()).or_default();
let entry = outer_entry let entry = outer_entry
.entry(labels) .entry(labels)
@ -299,7 +303,14 @@ impl Inner {
for sample in samples { for sample in samples {
entry.add(*sample); entry.add(*sample);
} }
}) });
let mut total_len = 0;
for dist_map in d.values() {
total_len += dist_map.len();
}
metrics::gauge!("relay.collector.distributions.size").set(recordable(total_len));
} }
let d = self.distributions.read().unwrap().clone(); let d = self.distributions.read().unwrap().clone();
@ -358,10 +369,11 @@ impl MemoryCollector {
) { ) {
let mut d = self.inner.descriptions.write().unwrap(); let mut d = self.inner.descriptions.write().unwrap();
d.entry(key.as_str().to_owned()).or_insert(description); d.entry(key.as_str().to_owned()).or_insert(description);
metrics::gauge!("relay.collector.descriptions.size").set(recordable(d.len()));
} }
pub(crate) fn install(&self) -> Result<(), SetRecorderError> { pub(crate) fn install(&self) -> Result<(), SetRecorderError<Self>> {
metrics::set_boxed_recorder(Box::new(self.clone())) metrics::set_global_recorder(self.clone())
} }
} }
@ -393,19 +405,19 @@ impl Recorder for MemoryCollector {
self.add_description_if_missing(&key, description) self.add_description_if_missing(&key, description)
} }
fn register_counter(&self, key: &Key) -> metrics::Counter { fn register_counter(&self, key: &Key, _: &Metadata<'_>) -> metrics::Counter {
self.inner self.inner
.registry .registry
.get_or_create_counter(key, |c| c.clone().into()) .get_or_create_counter(key, |c| c.clone().into())
} }
fn register_gauge(&self, key: &Key) -> metrics::Gauge { fn register_gauge(&self, key: &Key, _: &Metadata<'_>) -> metrics::Gauge {
self.inner self.inner
.registry .registry
.get_or_create_gauge(key, |c| c.clone().into()) .get_or_create_gauge(key, |c| c.clone().into())
} }
fn register_histogram(&self, key: &Key) -> metrics::Histogram { fn register_histogram(&self, key: &Key, _: &Metadata<'_>) -> metrics::Histogram {
self.inner self.inner
.registry .registry
.get_or_create_histogram(key, |c| c.clone().into()) .get_or_create_histogram(key, |c| c.clone().into())

View file

@ -12,9 +12,8 @@ use activitystreams::{
}; };
use config::Environment; use config::Environment;
use http_signature_normalization_actix::{digest::ring::Sha256, prelude::VerifyDigest}; use http_signature_normalization_actix::{digest::ring::Sha256, prelude::VerifyDigest};
use rustls::{Certificate, PrivateKey}; use rustls::sign::CertifiedKey;
use std::{ use std::{
io::BufReader,
net::{IpAddr, SocketAddr}, net::{IpAddr, SocketAddr},
path::PathBuf, path::PathBuf,
}; };
@ -312,43 +311,34 @@ impl Config {
Some((config.addr, config.port).into()) Some((config.addr, config.port).into())
} }
pub(crate) fn open_keys(&self) -> Result<Option<(Vec<Certificate>, PrivateKey)>, Error> { pub(crate) async fn open_keys(&self) -> Result<Option<CertifiedKey>, Error> {
let tls = if let Some(tls) = &self.tls { let tls = if let Some(tls) = &self.tls {
tls tls
} else { } else {
tracing::warn!("No TLS config present"); tracing::info!("No TLS config present");
return Ok(None); return Ok(None);
}; };
let mut certs_reader = BufReader::new(std::fs::File::open(&tls.cert)?); let certs_bytes = tokio::fs::read(&tls.cert).await?;
let certs = rustls_pemfile::certs(&mut certs_reader)?; let certs =
rustls_pemfile::certs(&mut certs_bytes.as_slice()).collect::<Result<Vec<_>, _>>()?;
if certs.is_empty() { if certs.is_empty() {
tracing::warn!("No certs read from certificate file"); tracing::warn!("No certs read from certificate file");
return Ok(None); return Ok(None);
} }
let mut key_reader = BufReader::new(std::fs::File::open(&tls.key)?); let key_bytes = tokio::fs::read(&tls.key).await?;
let key = rustls_pemfile::read_one(&mut key_reader)?; let key = if let Some(key) = rustls_pemfile::private_key(&mut key_bytes.as_slice())? {
key
let certs = certs.into_iter().map(Certificate).collect();
let key = if let Some(key) = key {
match key {
rustls_pemfile::Item::RSAKey(der) => PrivateKey(der),
rustls_pemfile::Item::PKCS8Key(der) => PrivateKey(der),
rustls_pemfile::Item::ECKey(der) => PrivateKey(der),
_ => {
tracing::warn!("Unknown key format: {:?}", key);
return Ok(None);
}
}
} else { } else {
tracing::warn!("Failed to read private key"); tracing::warn!("Failed to read private key");
return Ok(None); return Ok(None);
}; };
Ok(Some((certs, key))) let key = rustls::crypto::ring::sign::any_supported_type(&key)?;
Ok(Some(CertifiedKey::new(certs, key)))
} }
pub(crate) fn footer_blurb(&self) -> Option<crate::templates::Html<String>> { pub(crate) fn footer_blurb(&self) -> Option<crate::templates::Html<String>> {

View file

@ -9,10 +9,10 @@ pub(crate) struct LastOnline {
impl LastOnline { impl LastOnline {
pub(crate) fn mark_seen(&self, iri: &IriStr) { pub(crate) fn mark_seen(&self, iri: &IriStr) {
if let Some(authority) = iri.authority_str() { if let Some(authority) = iri.authority_str() {
self.domains let mut guard = self.domains.lock().unwrap();
.lock() guard.insert(authority.to_string(), OffsetDateTime::now_utc());
.unwrap() metrics::gauge!("relay.last-online.size",)
.insert(authority.to_string(), OffsetDateTime::now_utc()); .set(crate::collector::recordable(guard.len()));
} }
} }

View file

@ -73,7 +73,9 @@ impl State {
} }
pub(crate) fn cache(&self, object_id: IriString, actor_id: IriString) { pub(crate) fn cache(&self, object_id: IriString, actor_id: IriString) {
self.object_cache.write().unwrap().put(object_id, actor_id); let mut guard = self.object_cache.write().unwrap();
guard.put(object_id, actor_id);
metrics::gauge!("relay.object-cache.size").set(crate::collector::recordable(guard.len()));
} }
pub(crate) fn is_connected(&self, iri: &IriString) -> bool { pub(crate) fn is_connected(&self, iri: &IriString) -> bool {

149
src/db.rs
View file

@ -7,7 +7,7 @@ use rsa::{
pkcs8::{DecodePrivateKey, EncodePrivateKey}, pkcs8::{DecodePrivateKey, EncodePrivateKey},
RsaPrivateKey, RsaPrivateKey,
}; };
use sled::{Batch, Tree}; use sled::{transaction::TransactionError, Batch, Transactional, Tree};
use std::{ use std::{
collections::{BTreeMap, HashMap}, collections::{BTreeMap, HashMap},
sync::{ sync::{
@ -283,10 +283,15 @@ impl Db {
pub(crate) async fn check_health(&self) -> Result<(), Error> { pub(crate) async fn check_health(&self) -> Result<(), Error> {
let next = self.inner.healthz_counter.fetch_add(1, Ordering::Relaxed); let next = self.inner.healthz_counter.fetch_add(1, Ordering::Relaxed);
self.unblock(move |inner| { self.unblock(move |inner| {
inner let res = inner
.healthz .healthz
.insert("healthz", &next.to_be_bytes()[..]) .insert("healthz", &next.to_be_bytes()[..])
.map_err(Error::from) .map_err(Error::from);
metrics::gauge!("relay.db.healthz.size")
.set(crate::collector::recordable(inner.healthz.len()));
res
}) })
.await?; .await?;
self.inner.healthz.flush_async().await?; self.inner.healthz.flush_async().await?;
@ -349,6 +354,9 @@ impl Db {
.actor_id_info .actor_id_info
.insert(actor_id.as_str().as_bytes(), vec)?; .insert(actor_id.as_str().as_bytes(), vec)?;
metrics::gauge!("relay.db.actor-id-info.size")
.set(crate::collector::recordable(inner.actor_id_info.len()));
Ok(()) Ok(())
}) })
.await .await
@ -383,6 +391,9 @@ impl Db {
.actor_id_instance .actor_id_instance
.insert(actor_id.as_str().as_bytes(), vec)?; .insert(actor_id.as_str().as_bytes(), vec)?;
metrics::gauge!("relay.db.actor-id-instance.size")
.set(crate::collector::recordable(inner.actor_id_instance.len()));
Ok(()) Ok(())
}) })
.await .await
@ -417,6 +428,9 @@ impl Db {
.actor_id_contact .actor_id_contact
.insert(actor_id.as_str().as_bytes(), vec)?; .insert(actor_id.as_str().as_bytes(), vec)?;
metrics::gauge!("relay.db.actor-id-contact.size")
.set(crate::collector::recordable(inner.actor_id_contact.len()));
Ok(()) Ok(())
}) })
.await .await
@ -447,6 +461,12 @@ impl Db {
inner inner
.media_url_media_id .media_url_media_id
.insert(url.as_str().as_bytes(), id.as_bytes())?; .insert(url.as_str().as_bytes(), id.as_bytes())?;
metrics::gauge!("relay.db.media-id-media-url.size")
.set(crate::collector::recordable(inner.media_id_media_url.len()));
metrics::gauge!("relay.db.media-url-media-id.size")
.set(crate::collector::recordable(inner.media_url_media_id.len()));
Ok(()) Ok(())
}) })
.await .await
@ -538,6 +558,14 @@ impl Db {
inner inner
.actor_id_actor .actor_id_actor
.insert(actor.id.as_str().as_bytes(), vec)?; .insert(actor.id.as_str().as_bytes(), vec)?;
metrics::gauge!("relay.db.public-key-actor-id.size").set(crate::collector::recordable(
inner.public_key_id_actor_id.len(),
));
metrics::gauge!("relay.db.actor-id-actor.size").set(crate::collector::recordable(
inner.public_key_id_actor_id.len(),
));
Ok(()) Ok(())
}) })
.await .await
@ -550,6 +578,10 @@ impl Db {
.connected_actor_ids .connected_actor_ids
.remove(actor_id.as_str().as_bytes())?; .remove(actor_id.as_str().as_bytes())?;
metrics::gauge!("relay.db.connected-actor-ids.size").set(crate::collector::recordable(
inner.connected_actor_ids.len(),
));
Ok(()) Ok(())
}) })
.await .await
@ -562,6 +594,10 @@ impl Db {
.connected_actor_ids .connected_actor_ids
.insert(actor_id.as_str().as_bytes(), actor_id.as_str().as_bytes())?; .insert(actor_id.as_str().as_bytes(), actor_id.as_str().as_bytes())?;
metrics::gauge!("relay.db.connected-actor-ids.size").set(crate::collector::recordable(
inner.connected_actor_ids.len(),
));
Ok(()) Ok(())
}) })
.await .await
@ -569,30 +605,64 @@ impl Db {
pub(crate) async fn add_blocks(&self, domains: Vec<String>) -> Result<(), Error> { pub(crate) async fn add_blocks(&self, domains: Vec<String>) -> Result<(), Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
for connected in inner.connected_by_domain(&domains) { let connected_by_domain = inner.connected_by_domain(&domains).collect::<Vec<_>>();
inner
.connected_actor_ids
.remove(connected.as_str().as_bytes())?;
}
for authority in &domains { let res = (
inner &inner.connected_actor_ids,
.blocked_domains &inner.blocked_domains,
.insert(domain_key(authority), authority.as_bytes())?; &inner.allowed_domains,
inner.allowed_domains.remove(domain_key(authority))?; )
} .transaction(|(connected, blocked, allowed)| {
let mut connected_batch = Batch::default();
let mut blocked_batch = Batch::default();
let mut allowed_batch = Batch::default();
Ok(()) for connected in &connected_by_domain {
connected_batch.remove(connected.as_str().as_bytes());
}
for authority in &domains {
blocked_batch
.insert(domain_key(authority).as_bytes(), authority.as_bytes());
allowed_batch.remove(domain_key(authority).as_bytes());
}
connected.apply_batch(&connected_batch)?;
blocked.apply_batch(&blocked_batch)?;
allowed.apply_batch(&allowed_batch)?;
Ok(())
});
metrics::gauge!("relay.db.connected-actor-ids.size").set(crate::collector::recordable(
inner.connected_actor_ids.len(),
));
metrics::gauge!("relay.db.blocked-domains.size")
.set(crate::collector::recordable(inner.blocked_domains.len()));
metrics::gauge!("relay.db.allowed-domains.size")
.set(crate::collector::recordable(inner.allowed_domains.len()));
match res {
Ok(()) => Ok(()),
Err(TransactionError::Abort(e) | TransactionError::Storage(e)) => Err(e.into()),
}
}) })
.await .await
} }
pub(crate) async fn remove_blocks(&self, domains: Vec<String>) -> Result<(), Error> { pub(crate) async fn remove_blocks(&self, domains: Vec<String>) -> Result<(), Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
let mut blocked_batch = Batch::default();
for authority in &domains { for authority in &domains {
inner.blocked_domains.remove(domain_key(authority))?; blocked_batch.remove(domain_key(authority).as_bytes());
} }
inner.blocked_domains.apply_batch(blocked_batch)?;
metrics::gauge!("relay.db.blocked-domains.size")
.set(crate::collector::recordable(inner.blocked_domains.len()));
Ok(()) Ok(())
}) })
.await .await
@ -600,12 +670,17 @@ impl Db {
pub(crate) async fn add_allows(&self, domains: Vec<String>) -> Result<(), Error> { pub(crate) async fn add_allows(&self, domains: Vec<String>) -> Result<(), Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
let mut allowed_batch = Batch::default();
for authority in &domains { for authority in &domains {
inner allowed_batch.insert(domain_key(authority).as_bytes(), authority.as_bytes());
.allowed_domains
.insert(domain_key(authority), authority.as_bytes())?;
} }
inner.allowed_domains.apply_batch(allowed_batch)?;
metrics::gauge!("relay.db.allowed-domains.size")
.set(crate::collector::recordable(inner.allowed_domains.len()));
Ok(()) Ok(())
}) })
.await .await
@ -614,17 +689,32 @@ impl Db {
pub(crate) async fn remove_allows(&self, domains: Vec<String>) -> Result<(), Error> { pub(crate) async fn remove_allows(&self, domains: Vec<String>) -> Result<(), Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
if inner.restricted_mode { if inner.restricted_mode {
for connected in inner.connected_by_domain(&domains) { let connected_by_domain = inner.connected_by_domain(&domains).collect::<Vec<_>>();
inner
.connected_actor_ids let mut connected_batch = Batch::default();
.remove(connected.as_str().as_bytes())?;
for connected in &connected_by_domain {
connected_batch.remove(connected.as_str().as_bytes());
} }
inner.connected_actor_ids.apply_batch(connected_batch)?;
metrics::gauge!("relay.db.connected-actor-ids.size").set(
crate::collector::recordable(inner.connected_actor_ids.len()),
);
} }
let mut allowed_batch = Batch::default();
for authority in &domains { for authority in &domains {
inner.allowed_domains.remove(domain_key(authority))?; allowed_batch.remove(domain_key(authority).as_bytes());
} }
inner.allowed_domains.apply_batch(allowed_batch)?;
metrics::gauge!("relay.db.allowed-domains.size")
.set(crate::collector::recordable(inner.allowed_domains.len()));
Ok(()) Ok(())
}) })
.await .await
@ -665,6 +755,10 @@ impl Db {
inner inner
.settings .settings
.insert("private-key".as_bytes(), pem_pkcs8.as_bytes())?; .insert("private-key".as_bytes(), pem_pkcs8.as_bytes())?;
metrics::gauge!("relay.db.settings.size")
.set(crate::collector::recordable(inner.settings.len()));
Ok(()) Ok(())
}) })
.await .await
@ -750,6 +844,11 @@ mod tests {
{ {
let db = let db =
Db::build_inner(true, sled::Config::new().temporary(true).open().unwrap()).unwrap(); Db::build_inner(true, sled::Config::new().temporary(true).open().unwrap()).unwrap();
actix_rt::System::new().block_on((f)(db));
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
.block_on((f)(db));
} }
} }

View file

@ -1,57 +1,85 @@
use activitystreams::checked::CheckError; use activitystreams::checked::CheckError;
use actix_rt::task::JoinError;
use actix_web::{ use actix_web::{
error::{BlockingError, ResponseError}, error::{BlockingError, ResponseError},
http::StatusCode, http::StatusCode,
HttpResponse, HttpResponse,
}; };
use background_jobs::BoxError;
use color_eyre::eyre::Error as Report;
use http_signature_normalization_reqwest::SignError; use http_signature_normalization_reqwest::SignError;
use std::{convert::Infallible, fmt::Debug, io}; use std::{convert::Infallible, io, sync::Arc};
use tracing_error::SpanTrace; use tokio::task::JoinError;
#[derive(Clone)]
struct ArcKind {
kind: Arc<ErrorKind>,
}
impl std::fmt::Debug for ArcKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.kind.fmt(f)
}
}
impl std::fmt::Display for ArcKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.kind.fmt(f)
}
}
impl std::error::Error for ArcKind {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
self.kind.source()
}
}
pub(crate) struct Error { pub(crate) struct Error {
context: String, kind: ArcKind,
kind: ErrorKind, display: Box<str>,
debug: Box<str>,
} }
impl Error { impl Error {
fn kind(&self) -> &ErrorKind {
&self.kind.kind
}
pub(crate) fn is_breaker(&self) -> bool { pub(crate) fn is_breaker(&self) -> bool {
matches!(self.kind, ErrorKind::Breaker) matches!(self.kind(), ErrorKind::Breaker)
} }
pub(crate) fn is_not_found(&self) -> bool { pub(crate) fn is_not_found(&self) -> bool {
matches!(self.kind, ErrorKind::Status(_, StatusCode::NOT_FOUND)) matches!(self.kind(), ErrorKind::Status(_, StatusCode::NOT_FOUND))
} }
pub(crate) fn is_bad_request(&self) -> bool { pub(crate) fn is_bad_request(&self) -> bool {
matches!(self.kind, ErrorKind::Status(_, StatusCode::BAD_REQUEST)) matches!(self.kind(), ErrorKind::Status(_, StatusCode::BAD_REQUEST))
} }
pub(crate) fn is_gone(&self) -> bool { pub(crate) fn is_gone(&self) -> bool {
matches!(self.kind, ErrorKind::Status(_, StatusCode::GONE)) matches!(self.kind(), ErrorKind::Status(_, StatusCode::GONE))
} }
pub(crate) fn is_malformed_json(&self) -> bool { pub(crate) fn is_malformed_json(&self) -> bool {
matches!(self.kind, ErrorKind::Json(_)) matches!(self.kind(), ErrorKind::Json(_))
} }
} }
impl std::fmt::Debug for Error { impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
writeln!(f, "{:?}", self.kind) f.write_str(&self.debug)
} }
} }
impl std::fmt::Display for Error { impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
writeln!(f, "{}", self.kind)?; f.write_str(&self.display)
std::fmt::Display::fmt(&self.context, f)
} }
} }
impl std::error::Error for Error { impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
self.kind.source() self.kind().source()
} }
} }
@ -60,60 +88,77 @@ where
ErrorKind: From<T>, ErrorKind: From<T>,
{ {
fn from(error: T) -> Self { fn from(error: T) -> Self {
let kind = ArcKind {
kind: Arc::new(ErrorKind::from(error)),
};
let report = Report::new(kind.clone());
let display = format!("{report}");
let debug = format!("{report:?}");
Error { Error {
context: SpanTrace::capture().to_string(), kind,
kind: error.into(), display: Box::from(display),
debug: Box::from(debug),
} }
} }
} }
#[derive(Debug, thiserror::Error)] #[derive(Debug, thiserror::Error)]
pub(crate) enum ErrorKind { pub(crate) enum ErrorKind {
#[error("Error queueing job, {0}")] #[error("Error in extractor")]
Queue(anyhow::Error), Extractor(#[from] crate::extractors::ErrorKind),
#[error("Error in configuration, {0}")] #[error("Error queueing job")]
Queue(#[from] BoxError),
#[error("Error in configuration")]
Config(#[from] config::ConfigError), Config(#[from] config::ConfigError),
#[error("Couldn't parse key, {0}")] #[error("Couldn't parse key")]
Pkcs8(#[from] rsa::pkcs8::Error), Pkcs8(#[from] rsa::pkcs8::Error),
#[error("Couldn't encode public key, {0}")] #[error("Couldn't encode public key")]
Spki(#[from] rsa::pkcs8::spki::Error), Spki(#[from] rsa::pkcs8::spki::Error),
#[error("Couldn't sign request")] #[error("Couldn't sign request")]
SignRequest, SignRequest,
#[error("Response body from server exceeded limits")]
BodyTooLarge,
#[error("Couldn't make request")] #[error("Couldn't make request")]
Reqwest(#[from] reqwest::Error), Reqwest(#[from] reqwest::Error),
#[error("Couldn't build client")] #[error("Couldn't make request")]
ReqwestMiddleware(#[from] reqwest_middleware::Error), ReqwestMiddleware(#[from] reqwest_middleware::Error),
#[error("Couldn't parse IRI, {0}")] #[error("Couldn't parse IRI")]
ParseIri(#[from] activitystreams::iri_string::validate::Error), ParseIri(#[from] activitystreams::iri_string::validate::Error),
#[error("Couldn't normalize IRI, {0}")] #[error("Couldn't normalize IRI")]
NormalizeIri(#[from] std::collections::TryReserveError), NormalizeIri(#[from] std::collections::TryReserveError),
#[error("Couldn't perform IO, {0}")] #[error("Couldn't perform IO")]
Io(#[from] io::Error), Io(#[from] io::Error),
#[error("Couldn't sign string, {0}")] #[error("Couldn't sign string, {0}")]
Rsa(rsa::errors::Error), Rsa(rsa::errors::Error),
#[error("Couldn't use db, {0}")] #[error("Couldn't use db")]
Sled(#[from] sled::Error), Sled(#[from] sled::Error),
#[error("Couldn't do the json thing, {0}")] #[error("Couldn't do the json thing")]
Json(#[from] serde_json::Error), Json(#[from] serde_json::Error),
#[error("Couldn't sign request, {0}")] #[error("Couldn't sign request")]
Sign(#[from] SignError), Sign(#[from] SignError),
#[error("Couldn't sign digest")] #[error("Couldn't sign digest")]
Signature(#[from] rsa::signature::Error), Signature(#[from] rsa::signature::Error),
#[error("Couldn't prepare TLS private key")]
PrepareKey(#[from] rustls::Error),
#[error("Couldn't verify signature")] #[error("Couldn't verify signature")]
VerifySignature, VerifySignature,
@ -144,10 +189,10 @@ pub(crate) enum ErrorKind {
#[error("Wrong ActivityPub kind, {0}")] #[error("Wrong ActivityPub kind, {0}")]
Kind(String), Kind(String),
#[error("Too many CPUs, {0}")] #[error("Too many CPUs")]
CpuCount(#[from] std::num::TryFromIntError), CpuCount(#[from] std::num::TryFromIntError),
#[error("{0}")] #[error("Host mismatch")]
HostMismatch(#[from] CheckError), HostMismatch(#[from] CheckError),
#[error("Couldn't flush buffer")] #[error("Couldn't flush buffer")]
@ -201,7 +246,7 @@ pub(crate) enum ErrorKind {
impl ResponseError for Error { impl ResponseError for Error {
fn status_code(&self) -> StatusCode { fn status_code(&self) -> StatusCode {
match self.kind { match self.kind() {
ErrorKind::NotAllowed(_) | ErrorKind::WrongActor(_) | ErrorKind::BadActor(_, _) => { ErrorKind::NotAllowed(_) | ErrorKind::WrongActor(_) | ErrorKind::BadActor(_, _) => {
StatusCode::FORBIDDEN StatusCode::FORBIDDEN
} }
@ -221,7 +266,7 @@ impl ResponseError for Error {
.insert_header(("Content-Type", "application/activity+json")) .insert_header(("Content-Type", "application/activity+json"))
.body( .body(
serde_json::to_string(&serde_json::json!({ serde_json::to_string(&serde_json::json!({
"error": self.kind.to_string(), "error": self.kind().to_string(),
})) }))
.unwrap_or_else(|_| "{}".to_string()), .unwrap_or_else(|_| "{}".to_string()),
) )

View file

@ -1,19 +1,15 @@
use actix_web::{ use actix_web::{
dev::Payload, dev::Payload,
error::ParseError, error::ParseError,
http::{ http::header::{from_one_raw_str, Header, HeaderName, HeaderValue, TryIntoHeaderValue},
header::{from_one_raw_str, Header, HeaderName, HeaderValue, TryIntoHeaderValue},
StatusCode,
},
web::Data, web::Data,
FromRequest, HttpMessage, HttpRequest, HttpResponse, ResponseError, FromRequest, HttpMessage, HttpRequest,
}; };
use bcrypt::{BcryptError, DEFAULT_COST}; use bcrypt::{BcryptError, DEFAULT_COST};
use http_signature_normalization_actix::{prelude::InvalidHeaderValue, Canceled, Spawn}; use http_signature_normalization_actix::{prelude::InvalidHeaderValue, Canceled, Spawn};
use std::{convert::Infallible, str::FromStr, time::Instant}; use std::{convert::Infallible, str::FromStr, time::Instant};
use tracing_error::SpanTrace;
use crate::{db::Db, future::LocalBoxFuture, spawner::Spawner}; use crate::{db::Db, error::Error, future::LocalBoxFuture, spawner::Spawner};
#[derive(Clone)] #[derive(Clone)]
pub(crate) struct AdminConfig { pub(crate) struct AdminConfig {
@ -28,7 +24,7 @@ impl AdminConfig {
} }
fn verify(&self, token: XApiToken) -> Result<bool, Error> { fn verify(&self, token: XApiToken) -> Result<bool, Error> {
bcrypt::verify(&token.0, &self.hashed_api_token).map_err(Error::bcrypt_verify) bcrypt::verify(token.0, &self.hashed_api_token).map_err(Error::bcrypt_verify)
} }
} }
@ -83,74 +79,42 @@ impl Admin {
} }
} }
#[derive(Debug, thiserror::Error)]
#[error("Failed authentication")]
pub(crate) struct Error {
context: String,
#[source]
kind: ErrorKind,
}
impl Error { impl Error {
fn invalid() -> Self { fn invalid() -> Self {
Error { Error::from(ErrorKind::Invalid)
context: SpanTrace::capture().to_string(),
kind: ErrorKind::Invalid,
}
} }
fn missing_config() -> Self { fn missing_config() -> Self {
Error { Error::from(ErrorKind::MissingConfig)
context: SpanTrace::capture().to_string(),
kind: ErrorKind::MissingConfig,
}
} }
fn missing_db() -> Self { fn missing_db() -> Self {
Error { Error::from(ErrorKind::MissingDb)
context: SpanTrace::capture().to_string(),
kind: ErrorKind::MissingDb,
}
} }
fn missing_spawner() -> Self { fn missing_spawner() -> Self {
Error { Error::from(ErrorKind::MissingSpawner)
context: SpanTrace::capture().to_string(),
kind: ErrorKind::MissingSpawner,
}
} }
fn bcrypt_verify(e: BcryptError) -> Self { fn bcrypt_verify(e: BcryptError) -> Self {
Error { Error::from(ErrorKind::BCryptVerify(e))
context: SpanTrace::capture().to_string(),
kind: ErrorKind::BCryptVerify(e),
}
} }
fn bcrypt_hash(e: BcryptError) -> Self { fn bcrypt_hash(e: BcryptError) -> Self {
Error { Error::from(ErrorKind::BCryptHash(e))
context: SpanTrace::capture().to_string(),
kind: ErrorKind::BCryptHash(e),
}
} }
fn parse_header(e: ParseError) -> Self { fn parse_header(e: ParseError) -> Self {
Error { Error::from(ErrorKind::ParseHeader(e))
context: SpanTrace::capture().to_string(),
kind: ErrorKind::ParseHeader(e),
}
} }
fn canceled(_: Canceled) -> Self { fn canceled(_: Canceled) -> Self {
Error { Error::from(ErrorKind::Canceled)
context: SpanTrace::capture().to_string(),
kind: ErrorKind::Canceled,
}
} }
} }
#[derive(Debug, thiserror::Error)] #[derive(Debug, thiserror::Error)]
enum ErrorKind { pub(crate) enum ErrorKind {
#[error("Invalid API Token")] #[error("Invalid API Token")]
Invalid, Invalid,
@ -176,20 +140,6 @@ enum ErrorKind {
ParseHeader(#[source] ParseError), ParseHeader(#[source] ParseError),
} }
impl ResponseError for Error {
fn status_code(&self) -> StatusCode {
match self.kind {
ErrorKind::Invalid | ErrorKind::ParseHeader(_) => StatusCode::BAD_REQUEST,
_ => StatusCode::INTERNAL_SERVER_ERROR,
}
}
fn error_response(&self) -> HttpResponse {
HttpResponse::build(self.status_code())
.json(serde_json::json!({ "msg": self.kind.to_string() }))
}
}
impl FromRequest for Admin { impl FromRequest for Admin {
type Error = Error; type Error = Error;
type Future = LocalBoxFuture<'static, Result<Self, Self::Error>>; type Future = LocalBoxFuture<'static, Result<Self, Self::Error>>;
@ -200,10 +150,8 @@ impl FromRequest for Admin {
Box::pin(async move { Box::pin(async move {
let (db, c, s, t) = res?; let (db, c, s, t) = res?;
Self::verify(c, s, t).await?; Self::verify(c, s, t).await?;
metrics::histogram!( metrics::histogram!("relay.admin.verify")
"relay.admin.verify", .record(now.elapsed().as_micros() as f64 / 1_000_000_f64);
now.elapsed().as_micros() as f64 / 1_000_000_f64
);
Ok(Admin { db }) Ok(Admin { db })
}) })
} }
@ -215,6 +163,10 @@ impl XApiToken {
pub(crate) fn new(token: String) -> Self { pub(crate) fn new(token: String) -> Self {
Self(token) Self(token)
} }
pub(crate) const fn http1_name() -> reqwest::header::HeaderName {
reqwest::header::HeaderName::from_static("x-api-token")
}
} }
impl Header for XApiToken { impl Header for XApiToken {

View file

@ -1,3 +1,4 @@
use std::{future::Future, pin::Pin}; use std::{future::Future, pin::Pin};
pub(crate) type LocalBoxFuture<'a, T> = Pin<Box<dyn Future<Output = T> + 'a>>; pub(crate) type LocalBoxFuture<'a, T> = Pin<Box<dyn Future<Output = T> + 'a>>;
pub(crate) type BoxFuture<'a, T> = Pin<Box<dyn Future<Output = T> + Send + 'a>>;

18
src/http1.rs Normal file
View file

@ -0,0 +1,18 @@
pub(crate) fn name_to_http02(
name: &reqwest::header::HeaderName,
) -> actix_web::http::header::HeaderName {
actix_web::http::header::HeaderName::from_bytes(name.as_ref())
.expect("headername conversions always work")
}
pub(crate) fn value_to_http02(
value: &reqwest::header::HeaderValue,
) -> actix_web::http::header::HeaderValue {
actix_web::http::header::HeaderValue::from_bytes(value.as_bytes())
.expect("headervalue conversions always work")
}
pub(crate) fn status_to_http02(status: reqwest::StatusCode) -> actix_web::http::StatusCode {
actix_web::http::StatusCode::from_u16(status.as_u16())
.expect("statuscode conversions always work")
}

View file

@ -19,8 +19,10 @@ use crate::{
jobs::{process_listeners::Listeners, record_last_online::RecordLastOnline}, jobs::{process_listeners::Listeners, record_last_online::RecordLastOnline},
}; };
use background_jobs::{ use background_jobs::{
memory_storage::{ActixTimer, Storage}, memory_storage::{Storage, TokioTimer},
Job, QueueHandle, WorkerConfig, metrics::MetricsStorage,
tokio::{QueueHandle, WorkerConfig},
Job,
}; };
use std::time::Duration; use std::time::Duration;
@ -38,15 +40,20 @@ fn debug_object(activity: &serde_json::Value) -> &serde_json::Value {
object object
} }
pub(crate) fn build_storage() -> MetricsStorage<Storage<TokioTimer>> {
MetricsStorage::wrap(Storage::new(TokioTimer))
}
pub(crate) fn create_workers( pub(crate) fn create_workers(
storage: MetricsStorage<Storage<TokioTimer>>,
state: State, state: State,
actors: ActorCache, actors: ActorCache,
media: MediaCache, media: MediaCache,
config: Config, config: Config,
) -> JobServer { ) -> std::io::Result<JobServer> {
let deliver_concurrency = config.deliver_concurrency(); let deliver_concurrency = config.deliver_concurrency();
let queue_handle = WorkerConfig::new(Storage::new(ActixTimer), move |queue_handle| { let queue_handle = WorkerConfig::new(storage, move |queue_handle| {
JobState::new( JobState::new(
state.clone(), state.clone(),
actors.clone(), actors.clone(),
@ -70,12 +77,12 @@ pub(crate) fn create_workers(
.set_worker_count("maintenance", 2) .set_worker_count("maintenance", 2)
.set_worker_count("apub", 2) .set_worker_count("apub", 2)
.set_worker_count("deliver", deliver_concurrency) .set_worker_count("deliver", deliver_concurrency)
.start(); .start()?;
queue_handle.every(Duration::from_secs(60 * 5), Listeners); queue_handle.every(Duration::from_secs(60 * 5), Listeners)?;
queue_handle.every(Duration::from_secs(60 * 10), RecordLastOnline); queue_handle.every(Duration::from_secs(60 * 10), RecordLastOnline)?;
JobServer::new(queue_handle) Ok(JobServer::new(queue_handle))
} }
#[derive(Clone, Debug)] #[derive(Clone, Debug)]

View file

@ -2,14 +2,14 @@ use crate::{
config::{Config, UrlKind}, config::{Config, UrlKind},
db::Actor, db::Actor,
error::Error, error::Error,
future::BoxFuture,
jobs::{ jobs::{
apub::{get_inboxes, prepare_activity}, apub::{get_inboxes, prepare_activity},
DeliverMany, JobState, DeliverMany, JobState,
}, },
}; };
use activitystreams::{activity::Announce as AsAnnounce, iri_string::types::IriString}; use activitystreams::{activity::Announce as AsAnnounce, iri_string::types::IriString};
use background_jobs::ActixJob; use background_jobs::Job;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Announce { pub(crate) struct Announce {
@ -62,14 +62,15 @@ fn generate_announce(
) )
} }
impl ActixJob for Announce { impl Job for Announce {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Announce"; const NAME: &'static str = "relay::jobs::apub::Announce";
const QUEUE: &'static str = "apub"; const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View file

@ -3,6 +3,7 @@ use crate::{
config::{Config, UrlKind}, config::{Config, UrlKind},
db::Actor, db::Actor,
error::{Error, ErrorKind}, error::{Error, ErrorKind},
future::BoxFuture,
jobs::{apub::prepare_activity, Deliver, JobState, QueryInstance, QueryNodeinfo}, jobs::{apub::prepare_activity, Deliver, JobState, QueryInstance, QueryNodeinfo},
}; };
use activitystreams::{ use activitystreams::{
@ -10,8 +11,7 @@ use activitystreams::{
iri_string::types::IriString, iri_string::types::IriString,
prelude::*, prelude::*,
}; };
use background_jobs::ActixJob; use background_jobs::Job;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Follow { pub(crate) struct Follow {
@ -111,14 +111,15 @@ fn generate_accept_follow(
) )
} }
impl ActixJob for Follow { impl Job for Follow {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Follow"; const NAME: &'static str = "relay::jobs::apub::Follow";
const QUEUE: &'static str = "apub"; const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View file

@ -2,11 +2,11 @@ use crate::{
apub::AcceptedActivities, apub::AcceptedActivities,
db::Actor, db::Actor,
error::{Error, ErrorKind}, error::{Error, ErrorKind},
future::BoxFuture,
jobs::{apub::get_inboxes, DeliverMany, JobState}, jobs::{apub::get_inboxes, DeliverMany, JobState},
}; };
use activitystreams::prelude::*; use activitystreams::prelude::*;
use background_jobs::ActixJob; use background_jobs::Job;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Forward { pub(crate) struct Forward {
@ -47,14 +47,15 @@ impl Forward {
} }
} }
impl ActixJob for Forward { impl Job for Forward {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Forward"; const NAME: &'static str = "relay::jobs::apub::Forward";
const QUEUE: &'static str = "apub"; const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View file

@ -2,10 +2,10 @@ use crate::{
config::UrlKind, config::UrlKind,
db::Actor, db::Actor,
error::Error, error::Error,
future::BoxFuture,
jobs::{apub::generate_undo_follow, Deliver, JobState}, jobs::{apub::generate_undo_follow, Deliver, JobState},
}; };
use background_jobs::ActixJob; use background_jobs::Job;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Reject(pub(crate) Actor); pub(crate) struct Reject(pub(crate) Actor);
@ -33,14 +33,15 @@ impl Reject {
} }
} }
impl ActixJob for Reject { impl Job for Reject {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Reject"; const NAME: &'static str = "relay::jobs::apub::Reject";
const QUEUE: &'static str = "apub"; const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View file

@ -3,11 +3,11 @@ use crate::{
config::UrlKind, config::UrlKind,
db::Actor, db::Actor,
error::Error, error::Error,
future::BoxFuture,
jobs::{apub::generate_undo_follow, Deliver, JobState}, jobs::{apub::generate_undo_follow, Deliver, JobState},
}; };
use activitystreams::prelude::BaseExt; use activitystreams::prelude::BaseExt;
use background_jobs::ActixJob; use background_jobs::Job;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Undo { pub(crate) struct Undo {
@ -48,14 +48,15 @@ impl Undo {
} }
} }
impl ActixJob for Undo { impl Job for Undo {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Undo"; const NAME: &'static str = "relay::jobs::apub::Undo";
const QUEUE: &'static str = "apub"; const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View file

@ -1,12 +1,12 @@
use crate::{ use crate::{
apub::AcceptedActors, apub::AcceptedActors,
error::{Error, ErrorKind}, error::{Error, ErrorKind},
future::BoxFuture,
jobs::JobState, jobs::JobState,
requests::BreakerStrategy, requests::BreakerStrategy,
}; };
use activitystreams::{iri_string::types::IriString, object::Image, prelude::*}; use activitystreams::{iri_string::types::IriString, object::Image, prelude::*};
use background_jobs::ActixJob; use background_jobs::Job;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct QueryContact { pub(crate) struct QueryContact {
@ -85,15 +85,16 @@ fn to_contact(contact: AcceptedActors) -> Option<(String, String, IriString, Iri
Some((username, display_name, url, avatar)) Some((username, display_name, url, avatar))
} }
impl ActixJob for QueryContact { impl Job for QueryContact {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::QueryContact"; const NAME: &'static str = "relay::jobs::QueryContact";
const QUEUE: &'static str = "maintenance"; const QUEUE: &'static str = "maintenance";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View file

@ -1,11 +1,11 @@
use crate::{ use crate::{
error::Error, error::Error,
future::BoxFuture,
jobs::{debug_object, JobState}, jobs::{debug_object, JobState},
requests::BreakerStrategy, requests::BreakerStrategy,
}; };
use activitystreams::iri_string::types::IriString; use activitystreams::iri_string::types::IriString;
use background_jobs::{ActixJob, Backoff}; use background_jobs::{Backoff, Job};
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Deliver { pub(crate) struct Deliver {
@ -35,7 +35,7 @@ impl Deliver {
} }
#[tracing::instrument(name = "Deliver", skip(state))] #[tracing::instrument(name = "Deliver", skip(state))]
async fn permform(self, state: JobState) -> Result<(), Error> { async fn perform(self, state: JobState) -> Result<(), Error> {
if let Err(e) = state if let Err(e) = state
.state .state
.requests .requests
@ -56,15 +56,16 @@ impl Deliver {
} }
} }
impl ActixJob for Deliver { impl Job for Deliver {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::Deliver"; const NAME: &'static str = "relay::jobs::Deliver";
const QUEUE: &'static str = "deliver"; const QUEUE: &'static str = "deliver";
const BACKOFF: Backoff = Backoff::Exponential(8); const BACKOFF: Backoff = Backoff::Exponential(8);
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.permform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View file

@ -1,10 +1,10 @@
use crate::{ use crate::{
error::Error, error::Error,
future::LocalBoxFuture, future::BoxFuture,
jobs::{debug_object, Deliver, JobState}, jobs::{debug_object, Deliver, JobState},
}; };
use activitystreams::iri_string::types::IriString; use activitystreams::iri_string::types::IriString;
use background_jobs::ActixJob; use background_jobs::Job;
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct DeliverMany { pub(crate) struct DeliverMany {
@ -45,14 +45,15 @@ impl DeliverMany {
} }
} }
impl ActixJob for DeliverMany { impl Job for DeliverMany {
type State = JobState; type State = JobState;
type Future = LocalBoxFuture<'static, Result<(), anyhow::Error>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::DeliverMany"; const NAME: &'static str = "relay::jobs::DeliverMany";
const QUEUE: &'static str = "deliver"; const QUEUE: &'static str = "deliver";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View file

@ -1,12 +1,12 @@
use crate::{ use crate::{
config::UrlKind, config::UrlKind,
error::{Error, ErrorKind}, error::{Error, ErrorKind},
future::BoxFuture,
jobs::{Boolish, JobState}, jobs::{Boolish, JobState},
requests::BreakerStrategy, requests::BreakerStrategy,
}; };
use activitystreams::{iri, iri_string::types::IriString}; use activitystreams::{iri, iri_string::types::IriString};
use background_jobs::ActixJob; use background_jobs::Job;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct QueryInstance { pub(crate) struct QueryInstance {
@ -165,15 +165,16 @@ impl QueryInstance {
} }
} }
impl ActixJob for QueryInstance { impl Job for QueryInstance {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::QueryInstance"; const NAME: &'static str = "relay::jobs::QueryInstance";
const QUEUE: &'static str = "maintenance"; const QUEUE: &'static str = "maintenance";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View file

@ -1,18 +1,18 @@
use crate::{ use crate::{
error::{Error, ErrorKind}, error::{Error, ErrorKind},
future::BoxFuture,
jobs::{Boolish, JobState, QueryContact}, jobs::{Boolish, JobState, QueryContact},
requests::BreakerStrategy, requests::BreakerStrategy,
}; };
use activitystreams::{iri, iri_string::types::IriString, primitives::OneOrMany}; use activitystreams::{iri, iri_string::types::IriString, primitives::OneOrMany};
use background_jobs::ActixJob; use background_jobs::Job;
use std::{fmt::Debug, future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct QueryNodeinfo { pub(crate) struct QueryNodeinfo {
actor_id: IriString, actor_id: IriString,
} }
impl Debug for QueryNodeinfo { impl std::fmt::Debug for QueryNodeinfo {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("QueryNodeinfo") f.debug_struct("QueryNodeinfo")
.field("actor_id", &self.actor_id.to_string()) .field("actor_id", &self.actor_id.to_string())
@ -92,7 +92,7 @@ impl QueryNodeinfo {
.metadata .metadata
.and_then(|meta| meta.into_iter().next().and_then(|meta| meta.staff_accounts)) .and_then(|meta| meta.into_iter().next().and_then(|meta| meta.staff_accounts))
{ {
if let Some(contact_id) = accounts.get(0) { if let Some(contact_id) = accounts.first() {
state state
.job_server .job_server
.queue(QueryContact::new(self.actor_id, contact_id.clone())) .queue(QueryContact::new(self.actor_id, contact_id.clone()))
@ -104,15 +104,16 @@ impl QueryNodeinfo {
} }
} }
impl ActixJob for QueryNodeinfo { impl Job for QueryNodeinfo {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::QueryNodeinfo"; const NAME: &'static str = "relay::jobs::QueryNodeinfo";
const QUEUE: &'static str = "maintenance"; const QUEUE: &'static str = "maintenance";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }
@ -155,7 +156,7 @@ struct Link {
#[serde(untagged)] #[serde(untagged)]
enum MaybeSupported<T> { enum MaybeSupported<T> {
Supported(T), Supported(T),
Unsupported(String), Unsupported(#[allow(unused)] String),
} }
impl<T> MaybeSupported<T> { impl<T> MaybeSupported<T> {
@ -164,8 +165,8 @@ impl<T> MaybeSupported<T> {
} }
} }
struct SupportedVersion(String); struct SupportedVersion(#[allow(unused)] String);
struct SupportedNodeinfo(String); struct SupportedNodeinfo(#[allow(unused)] String);
static SUPPORTED_VERSIONS: &str = "2."; static SUPPORTED_VERSIONS: &str = "2.";
static SUPPORTED_NODEINFO: &str = "http://nodeinfo.diaspora.software/ns/schema/2."; static SUPPORTED_NODEINFO: &str = "http://nodeinfo.diaspora.software/ns/schema/2.";

View file

@ -1,9 +1,9 @@
use crate::{ use crate::{
error::Error, error::Error,
future::BoxFuture,
jobs::{instance::QueryInstance, nodeinfo::QueryNodeinfo, JobState}, jobs::{instance::QueryInstance, nodeinfo::QueryNodeinfo, JobState},
}; };
use background_jobs::ActixJob; use background_jobs::Job;
use std::{future::Future, pin::Pin};
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)] #[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub(crate) struct Listeners; pub(crate) struct Listeners;
@ -23,14 +23,15 @@ impl Listeners {
} }
} }
impl ActixJob for Listeners { impl Job for Listeners {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::Listeners"; const NAME: &'static str = "relay::jobs::Listeners";
const QUEUE: &'static str = "maintenance"; const QUEUE: &'static str = "maintenance";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View file

@ -1,6 +1,5 @@
use crate::{error::Error, jobs::JobState}; use crate::{error::Error, future::BoxFuture, jobs::JobState};
use background_jobs::{ActixJob, Backoff}; use background_jobs::{Backoff, Job};
use std::{future::Future, pin::Pin};
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)] #[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub(crate) struct RecordLastOnline; pub(crate) struct RecordLastOnline;
@ -14,15 +13,16 @@ impl RecordLastOnline {
} }
} }
impl ActixJob for RecordLastOnline { impl Job for RecordLastOnline {
type State = JobState; type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>; type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::RecordLastOnline"; const NAME: &'static str = "relay::jobs::RecordLastOnline";
const QUEUE: &'static str = "maintenance"; const QUEUE: &'static str = "maintenance";
const BACKOFF: Backoff = Backoff::Linear(1); const BACKOFF: Backoff = Backoff::Linear(1);
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) }) Box::pin(self.perform(state))
} }
} }

View file

@ -4,7 +4,6 @@
use std::time::Duration; use std::time::Duration;
use activitystreams::iri_string::types::IriString; use activitystreams::iri_string::types::IriString;
use actix_rt::task::JoinHandle;
use actix_web::{middleware::Compress, web, App, HttpServer}; use actix_web::{middleware::Compress, web, App, HttpServer};
use collector::MemoryCollector; use collector::MemoryCollector;
#[cfg(feature = "console")] #[cfg(feature = "console")]
@ -13,14 +12,16 @@ use error::Error;
use http_signature_normalization_actix::middleware::VerifySignature; use http_signature_normalization_actix::middleware::VerifySignature;
use metrics_exporter_prometheus::PrometheusBuilder; use metrics_exporter_prometheus::PrometheusBuilder;
use metrics_util::layers::FanoutBuilder; use metrics_util::layers::FanoutBuilder;
use opentelemetry::{sdk::Resource, KeyValue}; use opentelemetry::{trace::TracerProvider, KeyValue};
use opentelemetry_otlp::WithExportConfig; use opentelemetry_otlp::WithExportConfig;
use opentelemetry_sdk::Resource;
use reqwest_middleware::ClientWithMiddleware; use reqwest_middleware::ClientWithMiddleware;
use rustls::ServerConfig; use rustls::ServerConfig;
use tokio::task::JoinHandle;
use tracing_actix_web::TracingLogger; use tracing_actix_web::TracingLogger;
use tracing_error::ErrorLayer; use tracing_error::ErrorLayer;
use tracing_log::LogTracer; use tracing_log::LogTracer;
use tracing_subscriber::{filter::Targets, fmt::format::FmtSpan, layer::SubscriberExt, Layer}; use tracing_subscriber::{filter::Targets, layer::SubscriberExt, Layer};
mod admin; mod admin;
mod apub; mod apub;
@ -32,11 +33,13 @@ mod db;
mod error; mod error;
mod extractors; mod extractors;
mod future; mod future;
mod http1;
mod jobs; mod jobs;
mod middleware; mod middleware;
mod requests; mod requests;
mod routes; mod routes;
mod spawner; mod spawner;
mod stream;
mod telegram; mod telegram;
use crate::config::UrlKind; use crate::config::UrlKind;
@ -55,16 +58,15 @@ use self::{
fn init_subscriber( fn init_subscriber(
software_name: &'static str, software_name: &'static str,
opentelemetry_url: Option<&IriString>, opentelemetry_url: Option<&IriString>,
) -> Result<(), anyhow::Error> { ) -> color_eyre::Result<()> {
LogTracer::init()?; LogTracer::init()?;
color_eyre::install()?;
let targets: Targets = std::env::var("RUST_LOG") let targets: Targets = std::env::var("RUST_LOG")
.unwrap_or_else(|_| "warn,actix_web=debug,actix_server=debug,tracing_actix_web=info".into()) .unwrap_or_else(|_| "info".into())
.parse()?; .parse()?;
let format_layer = tracing_subscriber::fmt::layer() let format_layer = tracing_subscriber::fmt::layer().with_filter(targets.clone());
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE)
.with_filter(targets.clone());
#[cfg(feature = "console")] #[cfg(feature = "console")]
let console_layer = ConsoleLayer::builder() let console_layer = ConsoleLayer::builder()
@ -81,21 +83,21 @@ fn init_subscriber(
let subscriber = subscriber.with(console_layer); let subscriber = subscriber.with(console_layer);
if let Some(url) = opentelemetry_url { if let Some(url) = opentelemetry_url {
let tracer = let exporter = opentelemetry_otlp::SpanExporter::builder()
opentelemetry_otlp::new_pipeline() .with_tonic()
.tracing() .with_endpoint(url.as_str())
.with_trace_config(opentelemetry::sdk::trace::config().with_resource( .build()?;
Resource::new(vec![KeyValue::new("service.name", software_name)]),
)) let tracer_provider = opentelemetry_sdk::trace::TracerProvider::builder()
.with_exporter( .with_resource(Resource::new(vec![KeyValue::new(
opentelemetry_otlp::new_exporter() "service.name",
.tonic() software_name,
.with_endpoint(url.as_str()), )]))
) .with_batch_exporter(exporter, opentelemetry_sdk::runtime::Tokio)
.install_batch(opentelemetry::runtime::Tokio)?; .build();
let otel_layer = tracing_opentelemetry::layer() let otel_layer = tracing_opentelemetry::layer()
.with_tracer(tracer) .with_tracer(tracer_provider.tracer(software_name))
.with_filter(targets); .with_filter(targets);
let subscriber = subscriber.with(otel_layer); let subscriber = subscriber.with(otel_layer);
@ -139,8 +141,8 @@ fn build_client(
Ok(client_with_middleware) Ok(client_with_middleware)
} }
#[actix_rt::main] #[tokio::main]
async fn main() -> Result<(), anyhow::Error> { async fn main() -> color_eyre::Result<()> {
dotenv::dotenv().ok(); dotenv::dotenv().ok();
let config = Config::build()?; let config = Config::build()?;
@ -150,7 +152,8 @@ async fn main() -> Result<(), anyhow::Error> {
let args = Args::new(); let args = Args::new();
if args.any() { if args.any() {
return client_main(config, args).await?; client_main(config, args).await??;
return Ok(());
} }
let collector = MemoryCollector::new(); let collector = MemoryCollector::new();
@ -160,35 +163,35 @@ async fn main() -> Result<(), anyhow::Error> {
.with_http_listener(bind_addr) .with_http_listener(bind_addr)
.build()?; .build()?;
actix_rt::spawn(exporter); tokio::spawn(exporter);
let recorder = FanoutBuilder::default() let recorder = FanoutBuilder::default()
.add_recorder(recorder) .add_recorder(recorder)
.add_recorder(collector.clone()) .add_recorder(collector.clone())
.build(); .build();
metrics::set_boxed_recorder(Box::new(recorder))?; metrics::set_global_recorder(recorder).map_err(|e| color_eyre::eyre::eyre!("{e}"))?;
} else { } else {
collector.install()?; collector.install()?;
} }
tracing::warn!("Opening DB"); tracing::info!("Opening DB");
let db = Db::build(&config)?; let db = Db::build(&config)?;
tracing::warn!("Building caches"); tracing::info!("Building caches");
let actors = ActorCache::new(db.clone()); let actors = ActorCache::new(db.clone());
let media = MediaCache::new(db.clone()); let media = MediaCache::new(db.clone());
server_main(db, actors, media, collector, config).await??; server_main(db, actors, media, collector, config).await?;
tracing::warn!("Application exit"); tracing::info!("Application exit");
Ok(()) Ok(())
} }
fn client_main(config: Config, args: Args) -> JoinHandle<Result<(), anyhow::Error>> { fn client_main(config: Config, args: Args) -> JoinHandle<color_eyre::Result<()>> {
actix_rt::spawn(do_client_main(config, args)) tokio::spawn(do_client_main(config, args))
} }
async fn do_client_main(config: Config, args: Args) -> Result<(), anyhow::Error> { async fn do_client_main(config: Config, args: Args) -> color_eyre::Result<()> {
let client = build_client( let client = build_client(
&config.user_agent(), &config.user_agent(),
config.client_timeout(), config.client_timeout(),
@ -271,32 +274,22 @@ async fn do_client_main(config: Config, args: Args) -> Result<(), anyhow::Error>
Ok(()) Ok(())
} }
fn server_main(
db: Db,
actors: ActorCache,
media: MediaCache,
collector: MemoryCollector,
config: Config,
) -> JoinHandle<Result<(), anyhow::Error>> {
actix_rt::spawn(do_server_main(db, actors, media, collector, config))
}
const VERIFY_RATIO: usize = 7; const VERIFY_RATIO: usize = 7;
async fn do_server_main( async fn server_main(
db: Db, db: Db,
actors: ActorCache, actors: ActorCache,
media: MediaCache, media: MediaCache,
collector: MemoryCollector, collector: MemoryCollector,
config: Config, config: Config,
) -> Result<(), anyhow::Error> { ) -> color_eyre::Result<()> {
let client = build_client( let client = build_client(
&config.user_agent(), &config.user_agent(),
config.client_timeout(), config.client_timeout(),
config.proxy_config(), config.proxy_config(),
)?; )?;
tracing::warn!("Creating state"); tracing::info!("Creating state");
let (signature_threads, verify_threads) = match config.signature_threads() { let (signature_threads, verify_threads) = match config.signature_threads() {
0 | 1 => (1, 1), 0 | 1 => (1, 1),
@ -309,23 +302,36 @@ async fn do_server_main(
} }
}; };
let verify_spawner = Spawner::build("verify-cpu", verify_threads)?; let verify_spawner = Spawner::build("verify-cpu", verify_threads.try_into()?)?;
let sign_spawner = Spawner::build("sign-cpu", signature_threads)?; let sign_spawner = Spawner::build("sign-cpu", signature_threads.try_into()?)?;
let key_id = config.generate_url(UrlKind::MainKey).to_string(); let key_id = config.generate_url(UrlKind::MainKey).to_string();
let state = State::build(db.clone(), key_id, sign_spawner, client).await?; let state = State::build(db.clone(), key_id, sign_spawner.clone(), client).await?;
if let Some((token, admin_handle)) = config.telegram_info() { if let Some((token, admin_handle)) = config.telegram_info() {
tracing::warn!("Creating telegram handler"); tracing::info!("Creating telegram handler");
telegram::start(admin_handle.to_owned(), db.clone(), token); telegram::start(admin_handle.to_owned(), db.clone(), token);
} }
let keys = config.open_keys()?; let cert_resolver = config
.open_keys()
.await?
.map(rustls_channel_resolver::channel::<32>);
let bind_address = config.bind_address(); let bind_address = config.bind_address();
let sign_spawner2 = sign_spawner.clone();
let verify_spawner2 = verify_spawner.clone();
let config2 = config.clone();
let job_store = jobs::build_storage();
let server = HttpServer::new(move || { let server = HttpServer::new(move || {
let job_server = let job_server = create_workers(
create_workers(state.clone(), actors.clone(), media.clone(), config.clone()); job_store.clone(),
state.clone(),
actors.clone(),
media.clone(),
config.clone(),
)
.expect("Failed to create job server");
let app = App::new() let app = App::new()
.app_data(web::Data::new(db.clone())) .app_data(web::Data::new(db.clone()))
@ -391,24 +397,42 @@ async fn do_server_main(
) )
}); });
if let Some((certs, key)) = keys { if let Some((cert_tx, cert_rx)) = cert_resolver {
tracing::warn!("Binding to {}:{} with TLS", bind_address.0, bind_address.1); let handle = tokio::spawn(async move {
let mut interval = tokio::time::interval(Duration::from_secs(30));
interval.tick().await;
loop {
interval.tick().await;
match config2.open_keys().await {
Ok(Some(key)) => cert_tx.update(key),
Ok(None) => tracing::warn!("Missing TLS keys"),
Err(e) => tracing::error!("Failed to read TLS keys {e}"),
}
}
});
tracing::info!("Binding to {}:{} with TLS", bind_address.0, bind_address.1);
let server_config = ServerConfig::builder() let server_config = ServerConfig::builder()
.with_safe_default_cipher_suites()
.with_safe_default_kx_groups()
.with_safe_default_protocol_versions()?
.with_no_client_auth() .with_no_client_auth()
.with_single_cert(certs, key)?; .with_cert_resolver(cert_rx);
server server
.bind_rustls_021(bind_address, server_config)? .bind_rustls_0_23(bind_address, server_config)?
.run() .run()
.await?; .await?;
handle.abort();
let _ = handle.await;
} else { } else {
tracing::warn!("Binding to {}:{}", bind_address.0, bind_address.1); tracing::info!("Binding to {}:{}", bind_address.0, bind_address.1);
server.bind(bind_address)?.run().await?; server.bind(bind_address)?.run().await?;
} }
tracing::warn!("Server closed"); sign_spawner2.close().await;
verify_spawner2.close().await;
tracing::info!("Server closed");
Ok(()) Ok(())
} }

View file

@ -40,7 +40,7 @@ impl Drop for LogOnDrop {
fn drop(&mut self) { fn drop(&mut self) {
if self.arm { if self.arm {
let duration = self.begin.elapsed(); let duration = self.begin.elapsed();
metrics::histogram!("relay.request.complete", duration, "path" => self.path.clone(), "method" => self.method.clone()); metrics::histogram!("relay.request.complete", "path" => self.path.clone(), "method" => self.method.clone()).record(duration);
} }
} }
} }
@ -80,7 +80,7 @@ where
fn call(&self, req: ServiceRequest) -> Self::Future { fn call(&self, req: ServiceRequest) -> Self::Future {
let log_on_drop = LogOnDrop { let log_on_drop = LogOnDrop {
begin: Instant::now(), begin: Instant::now(),
path: req.path().to_string(), path: format!("{:?}", req.match_pattern()),
method: req.method().to_string(), method: req.method().to_string(),
arm: false, arm: false,
}; };

View file

@ -2,6 +2,7 @@ use crate::{
data::LastOnline, data::LastOnline,
error::{Error, ErrorKind}, error::{Error, ErrorKind},
spawner::Spawner, spawner::Spawner,
stream::{aggregate, limit_stream},
}; };
use activitystreams::iri_string::types::IriString; use activitystreams::iri_string::types::IriString;
use actix_web::http::header::Date; use actix_web::http::header::Date;
@ -24,6 +25,9 @@ const ONE_MINUTE: u64 = 60 * ONE_SECOND;
const ONE_HOUR: u64 = 60 * ONE_MINUTE; const ONE_HOUR: u64 = 60 * ONE_MINUTE;
const ONE_DAY: u64 = 24 * ONE_HOUR; const ONE_DAY: u64 = 24 * ONE_HOUR;
// 20 KB
const JSON_SIZE_LIMIT: usize = 20 * 1024;
#[derive(Debug)] #[derive(Debug)]
pub(crate) enum BreakerStrategy { pub(crate) enum BreakerStrategy {
// Requires a successful response // Requires a successful response
@ -229,7 +233,11 @@ impl Requests {
} }
} }
return Err(ErrorKind::Status(parsed_url.to_string(), status).into()); return Err(ErrorKind::Status(
parsed_url.to_string(),
crate::http1::status_to_http02(status),
)
.into());
} }
// only actually succeed a breaker on 2xx response // only actually succeed a breaker on 2xx response
@ -262,7 +270,7 @@ impl Requests {
where where
T: serde::de::DeserializeOwned, T: serde::de::DeserializeOwned,
{ {
let body = self let stream = self
.do_deliver( .do_deliver(
url, url,
&serde_json::json!({}), &serde_json::json!({}),
@ -271,8 +279,9 @@ impl Requests {
strategy, strategy,
) )
.await? .await?
.bytes() .bytes_stream();
.await?;
let body = aggregate(limit_stream(stream, JSON_SIZE_LIMIT)).await?;
Ok(serde_json::from_slice(&body)?) Ok(serde_json::from_slice(&body)?)
} }
@ -299,11 +308,12 @@ impl Requests {
where where
T: serde::de::DeserializeOwned, T: serde::de::DeserializeOwned,
{ {
let body = self let stream = self
.do_fetch_response(url, accept, strategy) .do_fetch_response(url, accept, strategy)
.await? .await?
.bytes() .bytes_stream();
.await?;
let body = aggregate(limit_stream(stream, JSON_SIZE_LIMIT)).await?;
Ok(serde_json::from_slice(&body)?) Ok(serde_json::from_slice(&body)?)
} }
@ -432,7 +442,7 @@ struct Signer {
impl Signer { impl Signer {
fn sign(&self, signing_string: &str) -> Result<String, Error> { fn sign(&self, signing_string: &str) -> Result<String, Error> {
let mut signature = vec![0; self.private_key.public_modulus_len()]; let mut signature = vec![0; self.private_key.public().modulus_len()];
self.private_key self.private_key
.sign( .sign(

View file

@ -14,10 +14,11 @@ const MINIFY_CONFIG: minify_html::Cfg = minify_html::Cfg {
keep_html_and_head_opening_tags: false, keep_html_and_head_opening_tags: false,
keep_spaces_between_attributes: true, keep_spaces_between_attributes: true,
keep_comments: false, keep_comments: false,
keep_input_type_text_attr: true,
keep_ssi_comments: false,
preserve_brace_template_syntax: false,
preserve_chevron_percent_template_syntax: false,
minify_css: true, minify_css: true,
minify_css_level_1: true,
minify_css_level_2: false,
minify_css_level_3: false,
minify_js: true, minify_js: true,
remove_bangs: true, remove_bangs: true,
remove_processing_instructions: true, remove_processing_instructions: true,

View file

@ -2,10 +2,14 @@ use crate::{
data::MediaCache, data::MediaCache,
error::Error, error::Error,
requests::{BreakerStrategy, Requests}, requests::{BreakerStrategy, Requests},
stream::limit_stream,
}; };
use actix_web::{body::BodyStream, web, HttpResponse}; use actix_web::{body::BodyStream, web, HttpResponse};
use uuid::Uuid; use uuid::Uuid;
// 16 MB
const IMAGE_SIZE_LIMIT: usize = 16 * 1024 * 1024;
#[tracing::instrument(name = "Media", skip(media, requests))] #[tracing::instrument(name = "Media", skip(media, requests))]
pub(crate) async fn route( pub(crate) async fn route(
media: web::Data<MediaCache>, media: web::Data<MediaCache>,
@ -19,13 +23,19 @@ pub(crate) async fn route(
.fetch_response(&url, BreakerStrategy::Allow404AndBelow) .fetch_response(&url, BreakerStrategy::Allow404AndBelow)
.await?; .await?;
let mut response = HttpResponse::build(res.status()); let mut response = HttpResponse::build(crate::http1::status_to_http02(res.status()));
for (name, value) in res.headers().iter().filter(|(h, _)| *h != "connection") { for (name, value) in res.headers().iter().filter(|(h, _)| *h != "connection") {
response.insert_header((name.clone(), value.clone())); response.insert_header((
crate::http1::name_to_http02(name),
crate::http1::value_to_http02(value),
));
} }
return Ok(response.body(BodyStream::new(res.bytes_stream()))); return Ok(response.body(BodyStream::new(limit_stream(
res.bytes_stream(),
IMAGE_SIZE_LIMIT,
))));
} }
Ok(HttpResponse::NotFound().finish()) Ok(HttpResponse::NotFound().finish())

View file

@ -1,107 +1,30 @@
use async_cpupool::CpuPool;
use http_signature_normalization_actix::{Canceled, Spawn}; use http_signature_normalization_actix::{Canceled, Spawn};
use std::{ use std::time::Duration;
panic::AssertUnwindSafe,
sync::Arc,
thread::JoinHandle,
time::{Duration, Instant},
};
fn spawner_thread( #[derive(Clone)]
receiver: flume::Receiver<Box<dyn FnOnce() + Send>>,
name: &'static str,
id: usize,
) {
let guard = MetricsGuard::guard(name, id);
while let Ok(f) = receiver.recv() {
let start = Instant::now();
metrics::increment_counter!(format!("relay.{name}.operation.start"), "id" => id.to_string());
let res = std::panic::catch_unwind(AssertUnwindSafe(f));
metrics::increment_counter!(format!("relay.{name}.operation.end"), "complete" => res.is_ok().to_string(), "id" => id.to_string());
metrics::histogram!(format!("relay.{name}.operation.duration"), start.elapsed().as_secs_f64(), "complete" => res.is_ok().to_string(), "id" => id.to_string());
if let Err(e) = res {
tracing::warn!("{name} fn panicked: {e:?}");
}
}
guard.disarm();
}
#[derive(Clone, Debug)]
pub(crate) struct Spawner { pub(crate) struct Spawner {
name: &'static str, pool: CpuPool,
sender: Option<flume::Sender<Box<dyn FnOnce() + Send>>>,
threads: Option<Arc<Vec<JoinHandle<()>>>>,
}
struct MetricsGuard {
name: &'static str,
id: usize,
start: Instant,
armed: bool,
}
impl MetricsGuard {
fn guard(name: &'static str, id: usize) -> Self {
metrics::increment_counter!(format!("relay.{name}.launched"), "id" => id.to_string());
Self {
name,
id,
start: Instant::now(),
armed: true,
}
}
fn disarm(mut self) {
self.armed = false;
}
}
impl Drop for MetricsGuard {
fn drop(&mut self) {
metrics::increment_counter!(format!("relay.{}.closed", self.name), "clean" => (!self.armed).to_string(), "id" => self.id.to_string());
metrics::histogram!(format!("relay.{}.duration", self.name), self.start.elapsed().as_secs_f64(), "clean" => (!self.armed).to_string(), "id" => self.id.to_string());
tracing::warn!("Stopping {} - {}", self.name, self.id);
}
} }
impl Spawner { impl Spawner {
pub(crate) fn build(name: &'static str, threads: usize) -> std::io::Result<Self> { pub(crate) fn build(name: &'static str, threads: u16) -> color_eyre::Result<Self> {
let (sender, receiver) = flume::bounded(8); let pool = CpuPool::configure()
.name(name)
.max_threads(threads)
.build()?;
tracing::warn!("Launching {threads} {name}s"); Ok(Spawner { pool })
}
let threads = (0..threads) pub(crate) async fn close(self) {
.map(|i| { self.pool.close().await;
let receiver = receiver.clone();
std::thread::Builder::new()
.name(format!("{name}-{i}"))
.spawn(move || {
spawner_thread(receiver, name, i);
})
})
.collect::<Result<Vec<_>, _>>()?;
Ok(Spawner {
name,
sender: Some(sender),
threads: Some(Arc::new(threads)),
})
} }
} }
impl Drop for Spawner { impl std::fmt::Debug for Spawner {
fn drop(&mut self) { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.sender.take(); f.debug_struct("Spawner").finish()
if let Some(threads) = self.threads.take().and_then(Arc::into_inner) {
tracing::warn!("Joining {}s", self.name);
for thread in threads {
let _ = thread.join();
}
}
} }
} }
@ -111,9 +34,9 @@ where
{ {
let id = uuid::Uuid::new_v4(); let id = uuid::Uuid::new_v4();
metrics::increment_counter!("relay.spawner.wait-timer.start"); metrics::counter!("relay.spawner.wait-timer.start").increment(1);
let mut interval = actix_rt::time::interval(Duration::from_secs(5)); let mut interval = tokio::time::interval(Duration::from_secs(5));
// pass the first tick (instant) // pass the first tick (instant)
interval.tick().await; interval.tick().await;
@ -124,12 +47,12 @@ where
loop { loop {
tokio::select! { tokio::select! {
out = &mut fut => { out = &mut fut => {
metrics::increment_counter!("relay.spawner.wait-timer.end"); metrics::counter!("relay.spawner.wait-timer.end").increment(1);
return out; return out;
} }
_ = interval.tick() => { _ = interval.tick() => {
counter += 1; counter += 1;
metrics::increment_counter!("relay.spawner.wait-timer.pending"); metrics::counter!("relay.spawner.wait-timer.pending").increment(1);
tracing::warn!("Blocking operation {id} is taking a long time, {} seconds", counter * 5); tracing::warn!("Blocking operation {id} is taking a long time, {} seconds", counter * 5);
} }
} }
@ -144,22 +67,9 @@ impl Spawn for Spawner {
Func: FnOnce() -> Out + Send + 'static, Func: FnOnce() -> Out + Send + 'static,
Out: Send + 'static, Out: Send + 'static,
{ {
let sender = self.sender.as_ref().expect("Sender exists").clone(); let pool = self.pool.clone();
Box::pin(async move { Box::pin(async move { timer(pool.spawn(func)).await.map_err(|_| Canceled) })
let (tx, rx) = flume::bounded(1);
let _ = sender
.send_async(Box::new(move || {
if tx.try_send((func)()).is_err() {
tracing::warn!("Requestor hung up");
metrics::increment_counter!("relay.spawner.disconnected");
}
}))
.await;
timer(rx.recv_async()).await.map_err(|_| Canceled)
})
} }
} }
@ -171,21 +81,10 @@ impl http_signature_normalization_reqwest::Spawn for Spawner {
Func: FnOnce() -> Out + Send + 'static, Func: FnOnce() -> Out + Send + 'static,
Out: Send + 'static, Out: Send + 'static,
{ {
let sender = self.sender.as_ref().expect("Sender exists").clone(); let pool = self.pool.clone();
Box::pin(async move { Box::pin(async move {
let (tx, rx) = flume::bounded(1); timer(pool.spawn(func))
let _ = sender
.send_async(Box::new(move || {
if tx.try_send((func)()).is_err() {
tracing::warn!("Requestor hung up");
metrics::increment_counter!("relay.spawner.disconnected");
}
}))
.await;
timer(rx.recv_async())
.await .await
.map_err(|_| http_signature_normalization_reqwest::Canceled) .map_err(|_| http_signature_normalization_reqwest::Canceled)
}) })

59
src/stream.rs Normal file
View file

@ -0,0 +1,59 @@
use crate::error::{Error, ErrorKind};
use actix_web::web::{Bytes, BytesMut};
use futures_core::Stream;
use streem::IntoStreamer;
pub(crate) fn limit_stream<'a, S>(
input: S,
limit: usize,
) -> impl Stream<Item = Result<Bytes, Error>> + Send + 'a
where
S: Stream<Item = reqwest::Result<Bytes>> + Send + 'a,
{
streem::try_from_fn(move |yielder| async move {
let stream = std::pin::pin!(input);
let mut stream = stream.into_streamer();
let mut count = 0;
while let Some(bytes) = stream.try_next().await? {
count += bytes.len();
if count > limit {
return Err(ErrorKind::BodyTooLarge.into());
}
yielder.yield_ok(bytes).await;
}
Ok(())
})
}
pub(crate) async fn aggregate<S>(input: S) -> Result<Bytes, Error>
where
S: Stream<Item = Result<Bytes, Error>>,
{
let stream = std::pin::pin!(input);
let mut streamer = stream.into_streamer();
let mut buf = Vec::new();
while let Some(bytes) = streamer.try_next().await? {
buf.push(bytes);
}
if buf.len() == 1 {
return Ok(buf.pop().expect("buf has exactly one element"));
}
let total_size: usize = buf.iter().map(|b| b.len()).sum();
let mut bytes_mut = BytesMut::with_capacity(total_size);
for bytes in &buf {
bytes_mut.extend_from_slice(&bytes);
}
Ok(bytes_mut.freeze())
}

View file

@ -46,7 +46,7 @@ pub(crate) fn start(admin_handle: String, db: Db, token: &str) {
let bot = Bot::new(token); let bot = Bot::new(token);
let admin_handle = Arc::new(admin_handle); let admin_handle = Arc::new(admin_handle);
actix_rt::spawn(async move { tokio::spawn(async move {
let command_handler = teloxide::filter_command::<Command, _>().endpoint( let command_handler = teloxide::filter_command::<Command, _>().endpoint(
move |bot: Bot, msg: Message, cmd: Command| { move |bot: Bot, msg: Message, cmd: Command| {
let admin_handle = admin_handle.clone(); let admin_handle = admin_handle.clone();
@ -75,7 +75,8 @@ pub(crate) fn start(admin_handle: String, db: Db, token: &str) {
fn is_admin(admin_handle: &str, message: &Message) -> bool { fn is_admin(admin_handle: &str, message: &Message) -> bool {
message message
.from() .from
.as_ref()
.and_then(|user| user.username.as_deref()) .and_then(|user| user.username.as_deref())
.map(|username| username == admin_handle) .map(|username| username == admin_handle)
.unwrap_or(false) .unwrap_or(false)