Compare commits

..

No commits in common. "main" and "v0.3.20" have entirely different histories.

83 changed files with 3560 additions and 8471 deletions

2
.cargo/config Normal file
View file

@ -0,0 +1,2 @@
[build]
# rustflags = ["--cfg", "tokio_unstable"]

View file

@ -1,2 +0,0 @@
[build]
rustflags = ["--cfg", "tokio_unstable"]

421
.drone.yml Normal file
View file

@ -0,0 +1,421 @@
kind: pipeline
type: docker
name: clippy
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: clippy
image: asonix/rust-builder:latest-linux-amd64
pull: always
commands:
- rustup component add clippy
- cargo clippy -- -D warnings
trigger:
event:
- push
- pull_request
- tag
---
kind: pipeline
type: docker
name: tests
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: tests
image: asonix/rust-builder:latest-linux-amd64
pull: always
commands:
- cargo test
trigger:
event:
- push
- pull_request
- tag
---
kind: pipeline
type: docker
name: check-amd64
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: check
image: asonix/rust-builder:latest-linux-amd64
pull: always
commands:
- cargo check --target=$TARGET
trigger:
event:
- push
- pull_request
---
kind: pipeline
type: docker
name: build-amd64
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: build
image: asonix/rust-builder:latest-linux-amd64
pull: always
commands:
- cargo build --target=$TARGET --release
- $TOOL-strip target/$TARGET/release/relay
- cp target/$TARGET/release/relay .
- cp relay relay-linux-amd64
- name: push
image: plugins/docker:20
settings:
username: asonix
password:
from_secret: dockerhub_token
repo: asonix/relay
dockerfile: docker/drone/Dockerfile
auto_tag: true
auto_tag_suffix: linux-amd64
build_args:
- REPO_ARCH=amd64
- name: publish
image: plugins/gitea-release:1
settings:
api_key:
from_secret: gitea_token
base_url: https://git.asonix.dog
files:
- relay-linux-amd64
depends_on:
- clippy
- tests
trigger:
event:
- tag
---
kind: pipeline
type: docker
name: check-arm64v8
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: check
image: asonix/rust-builder:latest-linux-arm64v8
pull: always
commands:
- cargo check --target=$TARGET
trigger:
event:
- push
- pull_request
---
kind: pipeline
type: docker
name: build-arm64v8
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: build
image: asonix/rust-builder:latest-linux-arm64v8
pull: always
commands:
- cargo build --target=$TARGET --release
- $TOOL-strip target/$TARGET/release/relay
- cp target/$TARGET/release/relay .
- cp relay relay-linux-arm64v8
- name: push
image: plugins/docker:20
settings:
username: asonix
password:
from_secret: dockerhub_token
repo: asonix/relay
dockerfile: docker/drone/Dockerfile
auto_tag: true
auto_tag_suffix: linux-arm64v8
build_args:
- REPO_ARCH=arm64v8
- name: publish
image: plugins/gitea-release:1
settings:
api_key:
from_secret: gitea_token
base_url: https://git.asonix.dog
files:
- relay-linux-arm64v8
depends_on:
- clippy
- tests
trigger:
event:
- tag
---
kind: pipeline
type: docker
name: check-arm32v7
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: check
image: asonix/rust-builder:latest-linux-arm32v7
pull: always
commands:
- cargo check --target=$TARGET
trigger:
event:
- push
- pull_request
---
kind: pipeline
type: docker
name: build-arm32v7
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: build
image: asonix/rust-builder:latest-linux-arm32v7
pull: always
commands:
- cargo build --target=$TARGET --release
- $TOOL-strip target/$TARGET/release/relay
- cp target/$TARGET/release/relay .
- cp relay relay-linux-arm32v7
- name: push
image: plugins/docker:20
settings:
username: asonix
password:
from_secret: dockerhub_token
repo: asonix/relay
dockerfile: docker/drone/Dockerfile
auto_tag: true
auto_tag_suffix: linux-arm32v7
build_args:
- REPO_ARCH=arm32v7
- name: publish
image: plugins/gitea-release:1
settings:
api_key:
from_secret: gitea_token
base_url: https://git.asonix.dog
files:
- relay-linux-arm32v7
depends_on:
- clippy
- tests
trigger:
event:
- tag
---
kind: pipeline
type: docker
name: manifest
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: manifest
image: plugins/manifest:1
settings:
username: asonix
password:
from_secret: dockerhub_token
dump: true
auto_tag: true
ignore_missing: true
spec: docker/drone/manifest.tmpl
depends_on:
- build-amd64
- build-arm64v8
- build-arm32v7
trigger:
event:
- tag
---
kind: pipeline
type: docker
name: publish-crate
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: publish
image: asonix/rust-builder:latest-linux-amd64
pull: always
environment:
CRATES_IO_TOKEN:
from_secret: crates_io_token
commands:
- cargo publish --token $CRATES_IO_TOKEN
depends_on:
- build-amd64
- build-arm64v8
- build-arm32v7
trigger:
event:
- tag

9
.env
View file

@ -1,13 +1,4 @@
HOSTNAME=localhost:8079 HOSTNAME=localhost:8079
PORT=8079 PORT=8079
HTTPS=false
DEBUG=true
RESTRICTED_MODE=true RESTRICTED_MODE=true
VALIDATE_SIGNATURES=false
API_TOKEN=somesecretpassword
FOOTER_BLURB="Contact <a href=\"https://masto.asonix.dog/@asonix\">@asonix</a> for inquiries"
LOCAL_DOMAINS="masto.asonix.dog"
LOCAL_BLURB="<p>Welcome to my cool relay where I have cool relay things happening. I hope you enjoy your stay!</p>"
# OPENTELEMETRY_URL=http://localhost:4317 # OPENTELEMETRY_URL=http://localhost:4317
PROMETHEUS_ADDR=127.0.0.1
PROMETHEUS_PORT=9000

View file

@ -1,61 +0,0 @@
on:
push:
branches:
- '*'
pull_request:
branches:
- main
jobs:
clippy:
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Clippy
run: |
cargo clippy --no-default-features -- -D warnings
tests:
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Test
run: cargo test
check:
strategy:
fail-fast: false
matrix:
target:
- x86_64-unknown-linux-musl
- armv7-unknown-linux-musleabihf
- aarch64-unknown-linux-musl
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Debug builds
run: cargo zigbuild --target ${{ matrix.target }}

View file

@ -1,226 +0,0 @@
on:
push:
tags:
- 'v*.*.*'
env:
REGISTRY_IMAGE: asonix/relay
jobs:
clippy:
runs-on: base-image
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Clippy
run: |
# cargo clippy --no-default-features -- -D warnings
cargo clippy --no-default-features
tests:
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Test
run: cargo test
build:
needs:
- clippy
- tests
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
strategy:
fail-fast: false
matrix:
info:
- target: x86_64-unknown-linux-musl
artifact: linux-amd64
platform: linux/amd64
- target: armv7-unknown-linux-musleabihf
artifact: linux-arm32v7
platform: linux/arm/v7
- target: aarch64-unknown-linux-musl
artifact: linux-arm64v8
platform: linux/arm64
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Prepare Platform
run: |
platform=${{ matrix.info.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
shell: bash
-
name: Docker meta
id: meta
uses: https://github.com/docker/metadata-action@v5
with:
images: ${{ env.REGISTRY_IMAGE }}
flavor: |
latest=auto
suffix=-${{ matrix.info.artifact }}
tags: |
type=raw,value=latest,enable={{ is_default_branch }}
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
-
name: Set up QEMU
uses: https://github.com/docker/setup-qemu-action@v3
-
name: Set up Docker Buildx
uses: https://github.com/docker/setup-buildx-action@v3
-
name: Docker login
uses: https://github.com/docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
name: Compile relay
run: cargo zigbuild --target ${{ matrix.info.target }} --release
-
name: Prepare artifacts
run: |
mkdir artifacts
cp target/${{ matrix.info.target }}/release/relay artifacts/relay-${{ matrix.info.artifact }}
-
uses: https://github.com/actions/upload-artifact@v3
with:
name: binaries
path: artifacts/
-
name: Prepare binary
run: |
cp target/${{ matrix.info.target }}/release/relay docker/forgejo/relay
-
name: Build and push ${{ matrix.info.platform }} docker image
id: build
uses: docker/build-push-action@v5
with:
context: ./docker/forgejo
platforms: ${{ matrix.info.platform }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
outputs: type=image,name=${{ env.REGISTRY_IMAGE }},name-canonical=true,push=true
-
name: Export digest
run: |
mkdir -p /tmp/digests
digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${digest#sha256:}"
echo "Created /tmp/digests/${digest#sha256:}"
shell: bash
-
name: Upload ${{ matrix.info.platform }} digest
uses: https://github.com/actions/upload-artifact@v3
with:
name: digests
path: /tmp/digests/*
if-no-files-found: error
retention-days: 1
publish-docker:
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
needs: [build]
steps:
-
name: Download digests
uses: https://github.com/actions/download-artifact@v3
with:
name: digests
path: /tmp/digests
pattern: digests-*
merge-multiple: true
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
-
name: Docker login
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
name: Docker meta
id: meta
uses: https://github.com/docker/metadata-action@v5
with:
images: ${{ env.REGISTRY_IMAGE }}
flavor: |
latest=auto
tags: |
type=raw,value=latest,enable={{ is_default_branch }}
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
-
name: Create manifest list and push
working-directory: /tmp/digests
run: |
tags=$(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "${DOCKER_METADATA_OUTPUT_JSON}")
images=$(printf "${{ env.REGISTRY_IMAGE }}@sha256:%s " *)
echo "Running 'docker buildx imagetools create ${tags[@]} ${images[@]}'"
docker buildx imagetools create ${tags[@]} ${images[@]}
shell: bash
-
name: Inspect Image
run: |
docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ steps.meta.outputs.version }}
publish-forgejo:
needs: [build]
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
- uses: https://github.com/actions/download-artifact@v3
with:
name: binaries
path: artifacts/
merge-multiple: true
- uses: actions/forgejo-release@v1
with:
direction: upload
token: ${{ secrets.GITHUB_TOKEN }}
release-dir: artifacts/
publish-crate:
needs: [build]
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Publish Crate
run: cargo publish --token ${{ secrets.CRATES_IO_TOKEN }}

3
.gitignore vendored
View file

@ -1,6 +1,3 @@
/target /target
/artifacts /artifacts
/sled /sled
/.direnv
/.envrc
/result

4351
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,11 +1,11 @@
[package] [package]
name = "ap-relay" name = "ap-relay"
description = "A simple activitypub relay" description = "A simple activitypub relay"
version = "0.3.116" version = "0.3.20"
authors = ["asonix <asonix@asonix.dog>"] authors = ["asonix <asonix@asonix.dog>"]
license = "AGPL-3.0" license-file = "LICENSE"
readme = "README.md" readme = "README.md"
repository = "https://git.asonix.dog/asonix/relay" repository = "https://git.asonix.dog/asonix/ap-relay"
keywords = ["activitypub", "relay"] keywords = ["activitypub", "relay"]
edition = "2021" edition = "2021"
build = "src/build.rs" build = "src/build.rs"
@ -14,101 +14,72 @@ build = "src/build.rs"
name = "relay" name = "relay"
path = "src/main.rs" path = "src/main.rs"
[profile.release]
strip = true
[features] [features]
console = ["dep:console-subscriber"] console = ["console-subscriber"]
default = [] default = []
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
actix-web = { version = "4.4.0", default-features = false, features = ["compress-brotli", "compress-gzip", "rustls-0_23"] } anyhow = "1.0"
actix-webfinger = { version = "0.5.0", default-features = false } actix-rt = "2.6.0"
activitystreams = "0.7.0-alpha.25" actix-web = { version = "4.0.1", default-features = false }
activitystreams-ext = "0.1.0-alpha.3" actix-webfinger = "0.4.0"
ammonia = "4.0.0" activitystreams = "0.7.0-alpha.19"
async-cpupool = "0.3.0" activitystreams-ext = "0.1.0-alpha.2"
bcrypt = "0.16" ammonia = "3.1.0"
base64 = "0.22" async-rwlock = "1.3.0"
clap = { version = "4.0.0", features = ["derive"] } awc = { version = "3.0.0", default-features = false, features = ["rustls"] }
color-eyre = "0.6.2" base64 = "0.13"
config = { version = "0.14.0", default-features = false, features = ["toml", "json", "yaml"] } config = "0.12.0"
console-subscriber = { version = "0.4", optional = true } console-subscriber = { version = "0.1", optional = true }
dashmap = "6.0.1" dashmap = "5.1.0"
dotenv = "0.15.0" dotenv = "0.15.0"
futures-core = "0.3.30" futures-util = "0.3.17"
lru = "0.12.0" lru = "0.7.0"
metrics = "0.23.0"
metrics-exporter-prometheus = { version = "0.15.0", default-features = false, features = [
"http-listener",
] }
metrics-util = "0.17.0"
mime = "0.3.16" mime = "0.3.16"
minify-html = "0.15.0" opentelemetry = { version = "0.17", features = ["rt-tokio"] }
opentelemetry = "0.27.1" opentelemetry-otlp = "0.10"
opentelemetry_sdk = { version = "0.27", features = ["rt-tokio"] }
opentelemetry-otlp = { version = "0.27", features = ["grpc-tonic"] }
pin-project-lite = "0.2.9"
# pinned to metrics-util
quanta = "0.12.0"
rand = "0.8" rand = "0.8"
reqwest = { version = "0.12", default-features = false, features = ["rustls-tls", "stream"]} rsa = "0.5"
reqwest-middleware = { version = "0.4", default-features = false, features = ["json"] } rsa-magic-public-key = "0.4.0"
reqwest-tracing = "0.5.0"
ring = "0.17.5"
rsa = "0.9"
rsa-magic-public-key = "0.8.0"
rustls = { version = "0.23.0", default-features = false, features = ["ring", "logging", "std", "tls12"] }
rustls-channel-resolver = "0.3.0"
rustls-pemfile = "2"
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0" serde_json = "1.0"
sha2 = "0.10"
sled = "0.34.7" sled = "0.34.7"
streem = "0.2.0" structopt = "0.3.12"
teloxide = { version = "0.13.0", default-features = false, features = [ thiserror = "1.0"
"ctrlc_handler",
"macros",
"rustls",
] }
thiserror = "2.0"
time = { version = "0.3.17", features = ["serde"] }
tracing = "0.1" tracing = "0.1"
tracing-awc = "0.1.0"
tracing-error = "0.2" tracing-error = "0.2"
tracing-log = "0.2" tracing-futures = "0.2"
tracing-opentelemetry = "0.28" tracing-log = "0.1"
tracing-opentelemetry = "0.17"
tracing-subscriber = { version = "0.3", features = [ tracing-subscriber = { version = "0.3", features = [
"ansi", "ansi",
"env-filter", "env-filter",
"fmt", "fmt",
] } ] }
tokio = { version = "1", features = ["full", "tracing"] } uuid = { version = "0.8", features = ["v4", "serde"] }
uuid = { version = "1", features = ["v4", "serde"] }
[dependencies.background-jobs] [dependencies.background-jobs]
version = "0.19.0" version = "0.12.0"
default-features = false default-features = false
features = ["error-logging", "metrics", "tokio"] features = ["background-jobs-actix", "error-logging"]
[dependencies.http-signature-normalization-actix] [dependencies.http-signature-normalization-actix]
version = "0.11.1" version = "0.6.0"
default-features = false default-features = false
features = ["server", "ring"] features = ["client", "server", "sha-2"]
[dependencies.http-signature-normalization-reqwest]
version = "0.13.0"
default-features = false
features = ["middleware", "ring"]
[dependencies.tracing-actix-web] [dependencies.tracing-actix-web]
version = "0.7.9" version = "0.5.0"
[build-dependencies] [build-dependencies]
color-eyre = "0.6.2" anyhow = "1.0"
dotenv = "0.15.0" dotenv = "0.15.0"
ructe = { version = "0.17.0", features = ["sass", "mime03"] } ructe = { version = "0.14.0", features = ["sass", "mime03"] }
toml = "0.8.0" toml = "0.5.8"
[profile.dev.package.rsa] [profile.dev.package.rsa]
opt-level = 3 opt-level = 3

1078
LICENSE

File diff suppressed because it is too large Load diff

221
README.md
View file

@ -1,55 +1,29 @@
# AodeRelay # AodeRelay
_A simple and efficient activitypub relay_ _A simple and efficient activitypub relay_
### Installation
#### Docker
If running docker, you can start the relay with the following command:
```
$ sudo docker run --rm -it \
-v "$(pwd):/mnt/" \
-e ADDR=0.0.0.0 \
-e SLED_PATH=/mnt/sled/db-0.34 \
-p 8080:8080 \
asonix/relay:0.3.85
```
This will launch the relay with the database stored in "./sled/db-0.34" and listening on port 8080
#### Cargo
With cargo installed, the relay can be installed to your cargo bin directory with the following command
```
$ cargo install ap-relay
```
Then it can be run with this:
```
$ ADDR=0.0.0.0 relay
```
This will launch the relay with the database stored in "./sled/db-0.34" and listening on port 8080
#### Source
The relay can be launched directly from this git repository with the following commands:
```
$ git clone https://git.asonix.dog/asonix/relay
$ ADDR=0.0.0.0 cargo run --release
```
### Usage ### Usage
To simply run the server, the command is as follows To simply run the server, the command is as follows
```bash ```bash
$ ./relay $ ./relay
``` ```
#### Administration
> **NOTE:** The server _must be running_ in order to update the lists with the following commands
To learn about any other tasks, the `--help` flag can be passed To learn about any other tasks, the `--help` flag can be passed
```bash ```bash
$ ./relay --help
relay 0.2.0
An activitypub relay An activitypub relay
Usage: relay [OPTIONS] USAGE:
relay [FLAGS] [OPTIONS]
Options: FLAGS:
-b <BLOCKS> A list of domains that should be blocked -h, --help Prints help information
-a <ALLOWED> A list of domains that should be allowed -u, --undo Undo allowing or blocking domains
-u, --undo Undo allowing or blocking domains -V, --version Prints version information
-h, --help Print help information
OPTIONS:
-a <allowed>... A list of domains that should be allowed
-b <blocks>... A list of domains that should be blocked
``` ```
To add domains to the blocklist, use the `-b` flag and pass a list of domains To add domains to the blocklist, use the `-b` flag and pass a list of domains
@ -66,113 +40,13 @@ $ ./relay -a asonix.dog blimps.xyz
$ ./relay -ua asonix.dog blimps.xyz $ ./relay -ua asonix.dog blimps.xyz
``` ```
### Configuration Allowed domains are only checked against incoming activities if `RESTRICTED_MODE` is enabled.
By default, all these values are set to development values. These are read from the environment, or Blocks can be published in the nodeinfo metadata by setting `PUBLISH_BLOCKS` to true
from the `.env` file in the working directory.
```env
HOSTNAME=localhost:8080
ADDR=127.0.0.1
PORT=8080
DEBUG=true
RESTRICTED_MODE=false
VALIDATE_SIGNATURES=false
HTTPS=false
PRETTY_LOG=true
PUBLISH_BLOCKS=false
SLED_PATH=./sled/db-0.34
```
To run this server in production, you'll likely want to set most of them
```env
HOSTNAME=relay.my.tld
ADDR=0.0.0.0
PORT=8080
DEBUG=false
RESTRICTED_MODE=false
VALIDATE_SIGNATURES=true
HTTPS=true
PRETTY_LOG=false
PUBLISH_BLOCKS=true
SLED_PATH=./sled/db-0.34
RUST_LOG=warn
API_TOKEN=somepasswordishtoken
OPENTELEMETRY_URL=localhost:4317
TELEGRAM_TOKEN=secret
TELEGRAM_ADMIN_HANDLE=your_handle
TLS_KEY=/path/to/key
TLS_CERT=/path/to/cert
FOOTER_BLURB="Contact <a href=\"https://masto.asonix.dog/@asonix\">@asonix</a> for inquiries"
LOCAL_DOMAINS=masto.asonix.dog
LOCAL_BLURB="<p>Welcome to my cool relay where I have cool relay things happening. I hope you enjoy your stay!</p>"
PROMETHEUS_ADDR=0.0.0.0
PROMETHEUS_PORT=9000
CLIENT_TIMEOUT=10
DELIVER_CONCURRENCY=8
SIGNATURE_THREADS=2
```
#### Descriptions For advanced setups, it may be useful to run the relay API and the background tasks in separate
##### `HOSTNAME` processes, possibly on separate hosts. The `-j` and `-n` flags have been provided for this purpose.
The domain or IP address the relay is hosted on. If you launch the relay on `example.com`, that would be your HOSTNAME. The default is `localhost:8080` By passing `-n`, a relay can be spawned that handles no deliveries. By passing `-j`, a relay will
##### `ADDR` not be spawned, but any deliveries existing in the database will be processed.
The address the server binds to. By default, this is `127.0.0.1`, so for production cases it should be set to `0.0.0.0` or another public address.
##### `PORT`
The port the server binds to, this is `8080` by default but can be changed if needed.
##### `DEBUG`
Whether to print incoming activities to the console when requests hit the /inbox route. This defaults to `true`, but should be set to `false` in production cases. Since every activity sent to the relay is public anyway, this doesn't represent a security risk.
##### `RESTRICTED_MODE`
This setting enables an 'allowlist' setup where only servers that have been explicitly enabled through the `relay -a` command can join the relay. This is `false` by default. If `RESTRICTED_MODE` is not enabled, then manually allowing domains with `relay -a` has no effect.
##### `VALIDATE_SIGNATURES`
This setting enforces checking HTTP signatures on incoming activities. It defaults to `true`
##### `HTTPS`
Whether the current server is running on an HTTPS port or not. This is used for generating URLs to the current running relay. By default it is set to `true`
##### `PUBLISH_BLOCKS`
Whether or not to publish a list of blocked domains in the `nodeinfo` metadata for the server. It defaults to `false`.
##### `SLED_PATH`
Where to store the on-disk database of connected servers. This defaults to `./sled/db-0.34`.
##### `RUST_LOG`
The log level to print. Available levels are `ERROR`, `WARN`, `INFO`, `DEBUG`, and `TRACE`. You can also specify module paths to enable some logs but not others, such as `RUST_LOG=warn,tracing_actix_web=info,relay=info`. This defaults to `warn`
##### `SOURCE_REPO`
The URL to the source code for the relay. This defaults to `https://git.asonix.dog/asonix/relay`, but should be changed if you're running a fork hosted elsewhere.
##### `REPOSITORY_COMMIT_BASE`
The base path of the repository commit hash reference. For example, `/src/commit/` for Gitea, `/tree/` for GitLab.
##### `API_TOKEN`
The Secret token used to access the admin APIs. This must be set for the commandline to function
##### `OPENTELEMETRY_URL`
A URL for exporting opentelemetry spans. This is mostly useful for debugging. There is no default, since most people probably don't run an opentelemetry collector.
##### `TELEGRAM_TOKEN`
A Telegram Bot Token for running the relay administration bot. There is no default.
##### `TELEGRAM_ADMIN_HANDLE`
The handle of the telegram user allowed to administer the relay. There is no default.
##### `TLS_KEY`
Optional - This is specified if you are running the relay directly on the internet and have a TLS key to provide HTTPS for your relay
##### `TLS_CERT`
Optional - This is specified if you are running the relay directly on the internet and have a TLS certificate chain to provide HTTPS for your relay
##### `FOOTER_BLURB`
Optional - Add custom notes in the footer of the page
##### `LOCAL_DOMAINS`
Optional - domains of mastodon servers run by the same admin as the relay
##### `LOCAL_BLURB`
Optional - description for the relay
##### `PROMETHEUS_ADDR`
Optional - Address to bind to for serving the prometheus scrape endpoint
##### `PROMETHEUS_PORT`
Optional - Port to bind to for serving the prometheus scrape endpoint
##### `CLIENT_TIMEOUT`
Optional - How long the relay will hold open a connection (in seconds) to a remote server during
fetches and deliveries. This defaults to 10
##### `DELIVER_CONCURRENCY`
Optional - How many deliver requests the relay should allow to be in-flight per thread. the default
is 8
##### `SIGNATURE_THREADS`
Optional - Override number of threads used for signing and verifying requests. Default is
`std::thread::available_parallelism()` (It tries to detect how many cores you have). If it cannot
detect the correct number of cores, it falls back to 1.
##### 'PROXY_URL'
Optional - URL of an HTTP proxy to forward outbound requests through
##### 'PROXY_USERNAME'
Optional - username to provide to the HTTP proxy set with `PROXY_URL` through HTTP Basic Auth
##### 'PROXY_PASSWORD'
Optional - password to provide to the HTTP proxy set with `PROXY_URL` through HTTP Basic Auth
### Subscribing ### Subscribing
Mastodon admins can subscribe to this relay by adding the `/inbox` route to their relay settings. Mastodon admins can subscribe to this relay by adding the `/inbox` route to their relay settings.
@ -192,16 +66,10 @@ example, if the server is `https://relay.my.tld`, the correct URL would be
- Follow Public, become a listener of the relay - Follow Public, become a listener of the relay
- Undo Follow {self-actor}, stop listening on the relay, an Undo Follow will be sent back - Undo Follow {self-actor}, stop listening on the relay, an Undo Follow will be sent back
- Undo Follow Public, stop listening on the relay - Undo Follow Public, stop listening on the relay
- Delete {anything}, the Delete {anything} is relayed verbatim to listening servers. - Delete {anything}, the Delete {anything} is relayed verbatim to listening servers
Note that this activity will likely be rejected by the listening servers unless it has been Note that this activity will likely be rejected by the listening servers unless it has been
signed with a JSON-LD signature signed with a JSON-LD signature
- Update {anything}, the Update {anything} is relayed verbatim to listening servers. - Update {anything}, the Update {anything} is relayed verbatim to listening servers
Note that this activity will likely be rejected by the listening servers unless it has been
signed with a JSON-LD signature
- Add {anything}, the Add {anything} is relayed verbatim to listening servers.
Note that this activity will likely be rejected by the listening servers unless it has been
signed with a JSON-LD signature
- Remove {anything}, the Remove {anything} is relayed verbatim to listening servers.
Note that this activity will likely be rejected by the listening servers unless it has been Note that this activity will likely be rejected by the listening servers unless it has been
signed with a JSON-LD signature signed with a JSON-LD signature
@ -209,17 +77,46 @@ example, if the server is `https://relay.my.tld`, the correct URL would be
- Webfinger - Webfinger
- NodeInfo - NodeInfo
### Known issues ### Configuration
Pleroma and Akkoma do not support validating JSON-LD signatures, meaning many activities such as Delete, Update, Add, and Remove will be rejected with a message similar to `WARN: Response from https://example.com/inbox, "Invalid HTTP Signature"`. This is normal and not an issue with the relay. By default, all these values are set to development values. These are read from the environment, or
from the `.env` file in the working directory.
```env
HOSTNAME=localhost:8080
ADDR=127.0.0.1
PORT=8080
DEBUG=true
RESTRICTED_MODE=false
VALIDATE_SIGNATURES=false
HTTPS=false
DATABASE_URL=
PRETTY_LOG=true
PUBLISH_BLOCKS=false
```
To run this server in production, you'll likely want to set most of them
```env
HOSTNAME=relay.my.tld
ADDR=0.0.0.0
PORT=8080
DEBUG=false
RESTRICTED_MODE=false
VALIDATE_SIGNATURES=true
HTTPS=true
DATABASE_URL=postgres://pg_user:pg_pass@pg_host:pg_port/pg_database
PRETTY_LOG=false
PUBLISH_BLOCKS=true
```
### Contributing ### Contributing
Feel free to open issues for anything you find an issue with. Please note that any contributed code will be licensed under the AGPLv3. Unless otherwise stated, all contributions to this project will be licensed under the CSL with
the exceptions listed in the License section of this file.
### License ### License
Copyright © 2022 Riley Trautman This work is licensed under the Cooperative Software License. This is not a Free Software
License, but may be considered a "source-available License." For most hobbyists, self-employed
AodeRelay is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. developers, worker-owned companies, and cooperatives, this software can be used in most
projects so long as this software is distributed under the terms of the CSL. For more
AodeRelay is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. This file is part of AodeRelay. information, see the provided LICENSE file. If none exists, the license can be found online
[here](https://lynnesbian.space/csl/). If you are a free software project and wish to use this
You should have received a copy of the GNU General Public License along with AodeRelay. If not, see [http://www.gnu.org/licenses/](http://www.gnu.org/licenses/). software under the terms of the GNU Affero General Public License, please contact me at
[asonix@asonix.dog](mailto:asonix@asonix.dog) and we can sort that out. If you wish to use this
project under any other license, especially in proprietary software, the answer is likely no.

11
docker/drone/Dockerfile Normal file
View file

@ -0,0 +1,11 @@
ARG REPO_ARCH
FROM asonix/rust-runner:latest-linux-$REPO_ARCH
COPY relay /usr/local/bin/relay
USER app
EXPOSE 8080
VOLUME /mnt
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["/usr/local/bin/relay"]

View file

@ -0,0 +1,25 @@
image: asonix/relay:{{#if build.tag}}{{trimPrefix "v" build.tag}}{{else}}latest{{/if}}
{{#if build.tags}}
tags:
{{#each build.tags}}
- {{this}}
{{/each}}
{{/if}}
manifests:
-
image: asonix/relay:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-amd64
platform:
architecture: amd64
os: linux
-
image: asonix/relay:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-arm64v8
platform:
architecture: arm64
os: linux
variant: v8
-
image: asonix/relay:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-arm32v7
platform:
architecture: arm
os: linux
variant: v7

View file

@ -1,24 +0,0 @@
FROM alpine:3.19
ARG UID=991
ARG GID=991
ENV \
UID=${UID} \
GID=${GID}
USER root
RUN \
addgroup -g "${GID}" app && \
adduser -D -G app -u "${UID}" -g "" -h /opt/app app && \
apk add tini && \
chown -R app:app /mnt
COPY relay /usr/local/bin/relay
USER app
EXPOSE 6669
EXPOSE 8080
VOLUME /mnt
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["/usr/local/bin/relay"]

41
docker/prod/Dockerfile Normal file
View file

@ -0,0 +1,41 @@
ARG REPO_ARCH=amd64
# cross-build environment
FROM asonix/rust-builder:$REPO_ARCH-latest AS builder
ARG TAG=main
ARG BINARY=relay
ARG PROJECT=relay
ARG GIT_REPOSITORY=https://git.asonix.dog/asonix/$PROJECT
ENV \
BINARY=${BINARY}
ADD \
--chown=build:build \
$GIT_REPOSITORY/archive/$TAG.tar.gz \
/opt/build/repo.tar.gz
RUN \
tar zxf repo.tar.gz
WORKDIR /opt/build/$PROJECT
RUN \
build
# production environment
FROM asonix/rust-runner:$REPO_ARCH-latest
ARG BINARY=relay
ENV \
BINARY=${BINARY}
COPY \
--from=builder \
/opt/build/binary \
/usr/bin/${BINARY}
ENTRYPOINT ["/sbin/tini", "--"]
CMD /usr/bin/${BINARY}

37
docker/prod/build-image.sh Executable file
View file

@ -0,0 +1,37 @@
#!/usr/bin/env bash
function require() {
if [ "$1" = "" ]; then
echo "input '$2' required"
print_help
exit 1
fi
}
function print_help() {
echo "deploy.sh"
echo ""
echo "Usage:"
echo " deploy.sh [repo] [tag] [arch]"
echo ""
echo "Args:"
echo " repo: The docker repository to publish the image"
echo " tag: The tag applied to the docker image"
echo " arch: The architecuture of the doker image"
}
REPO=$1
TAG=$2
ARCH=$3
require "$REPO" repo
require "$TAG" tag
require "$ARCH" arch
sudo docker build \
--pull \
--build-arg TAG=$TAG \
--build-arg REPO_ARCH=$ARCH \
-t $REPO:$ARCH-$TAG \
-f Dockerfile \
.

87
docker/prod/deploy.sh Executable file
View file

@ -0,0 +1,87 @@
#!/usr/bin/env bash
function require() {
if [ "$1" = "" ]; then
echo "input '$2' required"
print_help
exit 1
fi
}
function print_help() {
echo "deploy.sh"
echo ""
echo "Usage:"
echo " deploy.sh [tag] [branch] [push]"
echo ""
echo "Args:"
echo " tag: The git tag to be applied to the repository and docker build"
echo " branch: The git branch to use for tagging and publishing"
echo " push: Whether or not to push the image"
echo ""
echo "Examples:"
echo " ./deploy.sh v0.3.0-alpha.13 main true"
echo " ./deploy.sh v0.3.0-alpha.13-shell-out asonix/shell-out false"
}
function build_image() {
tag=$1
arch=$2
push=$3
./build-image.sh asonix/relay $tag $arch
sudo docker tag asonix/relay:$arch-$tag asonix/relay:$arch-latest
if [ "$push" == "true" ]; then
sudo docker push asonix/relay:$arch-$tag
sudo docker push asonix/relay:$arch-latest
fi
}
# Creating the new tag
new_tag="$1"
branch="$2"
push=$3
require "$new_tag" "tag"
require "$branch" "branch"
require "$push" "push"
if ! sudo docker run --rm -it arm64v8/alpine:3.11 /bin/sh -c 'echo "docker is configured correctly"'
then
echo "docker is not configured to run on qemu-emulated architectures, fixing will require sudo"
sudo docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
fi
set -xe
git checkout $branch
# Changing the docker-compose prod
sed -i "s/asonix\/relay:.*/asonix\/relay:$new_tag/" docker-compose.yml
git add ../prod/docker-compose.yml
# The commit
git commit -m"Version $new_tag"
git tag $new_tag
# Push
git push origin $new_tag
git push
# Build for arm64v8, arm32v7 and amd64
build_image $new_tag arm64v8 $push
build_image $new_tag arm32v7 $push
build_image $new_tag amd64 $push
# Build for other archs
# TODO
if [ "$push" == "true" ]; then
./manifest.sh relay $new_tag
./manifest.sh relay latest
# pushd ../../
# cargo publish
# popd
fi

View file

@ -2,7 +2,7 @@ version: '3.3'
services: services:
relay: relay:
image: asonix/relay:0.3.115 image: asonix/relay:v0.3.8
ports: ports:
- "8079:8079" - "8079:8079"
restart: always restart: always
@ -14,7 +14,6 @@ services:
- RESTRICTED_MODE=false - RESTRICTED_MODE=false
- VALIDATE_SIGNATURES=true - VALIDATE_SIGNATURES=true
- HTTPS=true - HTTPS=true
- SLED_PATH=/mnt/sled/db-0.34 - DATABASE_URL=postgres://pg_user:pg_pass@pg_host:pg_port/pg_database
- PRETTY_LOG=false - PRETTY_LOG=false
- PUBLISH_BLOCKS=true - PUBLISH_BLOCKS=true
- API_TOKEN=somepasswordishtoken

43
docker/prod/manifest.sh Executable file
View file

@ -0,0 +1,43 @@
#!/usr/bin/env bash
function require() {
if [ "$1" = "" ]; then
echo "input '$2' required"
print_help
exit 1
fi
}
function print_help() {
echo "deploy.sh"
echo ""
echo "Usage:"
echo " manifest.sh [repo] [tag]"
echo ""
echo "Args:"
echo " repo: The docker repository to update"
echo " tag: The git tag to be applied to the image manifest"
}
REPO=$1
TAG=$2
require "$REPO" "repo"
require "$TAG" "tag"
set -xe
sudo docker manifest create asonix/$REPO:$TAG \
-a asonix/$REPO:arm64v8-$TAG \
-a asonix/$REPO:arm32v7-$TAG \
-a asonix/$REPO:amd64-$TAG
sudo docker manifest annotate asonix/$REPO:$TAG \
asonix/$REPO:arm64v8-$TAG --os linux --arch arm64 --variant v8
sudo docker manifest annotate asonix/$REPO:$TAG \
asonix/$REPO:arm32v7-$TAG --os linux --arch arm --variant v7
sudo docker manifest annotate asonix/$REPO:$TAG \
asonix/$REPO:amd64-$TAG --os linux --arch amd64
sudo docker manifest push asonix/$REPO:$TAG --purge

61
flake.lock generated
View file

@ -1,61 +0,0 @@
{
"nodes": {
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1733550349,
"narHash": "sha256-NcGumB4Lr6KSDq+nIqXtNA8QwAQKDSZT7N9OTGWbTrs=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "e2605d0744c2417b09f8bf850dfca42fcf537d34",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-24.11",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

View file

@ -1,34 +0,0 @@
{
description = "relay";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
flake-utils.url = "github:numtide/flake-utils";
};
outputs = { self, nixpkgs, flake-utils }:
flake-utils.lib.eachDefaultSystem (system:
let
pkgs = import nixpkgs {
inherit system;
};
in
{
packages = rec {
relay = pkgs.callPackage ./relay.nix { };
default = relay;
};
apps = rec {
dev = flake-utils.lib.mkApp { drv = self.packages.${system}.pict-rs-proxy; };
default = dev;
};
devShell = with pkgs; mkShell {
nativeBuildInputs = [ cargo cargo-outdated cargo-zigbuild clippy gcc protobuf rust-analyzer rustc rustfmt ];
RUST_SRC_PATH = "${pkgs.rust.packages.stable.rustPlatform.rustLibSrc}";
};
});
}

View file

@ -1,23 +0,0 @@
{ lib
, nixosTests
, rustPlatform
}:
rustPlatform.buildRustPackage {
pname = "relay";
version = "0.3.116";
src = ./.;
cargoLock.lockFile = ./Cargo.lock;
RUSTFLAGS = "--cfg tokio_unstable";
nativeBuildInputs = [ ];
passthru.tests = { inherit (nixosTests) relay; };
meta = with lib; {
description = "An ActivityPub relay";
homepage = "https://git.asonix.dog/asonix/relay";
license = with licenses; [ agpl3Plus ];
};
}

View file

@ -41,7 +41,7 @@ header {
} }
} }
article { section {
background-color: #fff; background-color: #fff;
color: #333; color: #333;
border: 1px solid #e5e5e5; border: 1px solid #e5e5e5;
@ -51,16 +51,8 @@ article {
max-width: 700px; max-width: 700px;
padding-bottom: 32px; padding-bottom: 32px;
section { > p:first-child {
border-bottom: 1px solid #e5e5e5; margin-top: 0;
> h4:first-child,
> p:first-child {
margin-top: 0;
}
> p:last-child {
margin-bottom: 0;
}
} }
h3 { h3 {
@ -75,13 +67,13 @@ article {
li { li {
padding-top: 36px; padding-top: 36px;
border-bottom: 1px solid #e5e5e5;
} }
.padded { .padded {
padding: 0 24px; padding: 0 24px;
} }
.local-explainer,
.joining { .joining {
padding: 24px; padding: 24px;
} }
@ -182,11 +174,9 @@ footer {
li { li {
padding: 0; padding: 0;
border-bottom: none;
} }
} }
article section {
border-bottom: none;
}
} }
} }
@ -236,7 +226,7 @@ footer {
padding: 24px; padding: 24px;
} }
article { section {
border-left: none; border-left: none;
border-right: none; border-right: none;
border-radius: 0; border-radius: 0;

View file

@ -1,32 +0,0 @@
use activitystreams::iri_string::types::IriString;
use std::collections::{BTreeMap, BTreeSet};
use time::OffsetDateTime;
pub mod client;
pub mod routes;
#[derive(serde::Deserialize, serde::Serialize)]
pub(crate) struct Domains {
domains: Vec<String>,
}
#[derive(serde::Deserialize, serde::Serialize)]
pub(crate) struct AllowedDomains {
pub(crate) allowed_domains: Vec<String>,
}
#[derive(serde::Deserialize, serde::Serialize)]
pub(crate) struct BlockedDomains {
pub(crate) blocked_domains: Vec<String>,
}
#[derive(serde::Deserialize, serde::Serialize)]
pub(crate) struct ConnectedActors {
pub(crate) connected_actors: Vec<IriString>,
}
#[derive(serde::Deserialize, serde::Serialize)]
pub(crate) struct LastSeen {
pub(crate) last_seen: BTreeMap<OffsetDateTime, BTreeSet<String>>,
pub(crate) never: Vec<String>,
}

View file

@ -1,133 +0,0 @@
use crate::{
admin::{AllowedDomains, BlockedDomains, ConnectedActors, Domains, LastSeen},
collector::Snapshot,
config::{AdminUrlKind, Config},
error::{Error, ErrorKind},
extractors::XApiToken,
};
use reqwest_middleware::ClientWithMiddleware;
use serde::de::DeserializeOwned;
pub(crate) async fn allow(
client: &ClientWithMiddleware,
config: &Config,
domains: Vec<String>,
) -> Result<(), Error> {
post_domains(client, config, domains, AdminUrlKind::Allow).await
}
pub(crate) async fn disallow(
client: &ClientWithMiddleware,
config: &Config,
domains: Vec<String>,
) -> Result<(), Error> {
post_domains(client, config, domains, AdminUrlKind::Disallow).await
}
pub(crate) async fn block(
client: &ClientWithMiddleware,
config: &Config,
domains: Vec<String>,
) -> Result<(), Error> {
post_domains(client, config, domains, AdminUrlKind::Block).await
}
pub(crate) async fn unblock(
client: &ClientWithMiddleware,
config: &Config,
domains: Vec<String>,
) -> Result<(), Error> {
post_domains(client, config, domains, AdminUrlKind::Unblock).await
}
pub(crate) async fn allowed(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<AllowedDomains, Error> {
get_results(client, config, AdminUrlKind::Allowed).await
}
pub(crate) async fn blocked(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<BlockedDomains, Error> {
get_results(client, config, AdminUrlKind::Blocked).await
}
pub(crate) async fn connected(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<ConnectedActors, Error> {
get_results(client, config, AdminUrlKind::Connected).await
}
pub(crate) async fn stats(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<Snapshot, Error> {
get_results(client, config, AdminUrlKind::Stats).await
}
pub(crate) async fn last_seen(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<LastSeen, Error> {
get_results(client, config, AdminUrlKind::LastSeen).await
}
async fn get_results<T: DeserializeOwned>(
client: &ClientWithMiddleware,
config: &Config,
url_kind: AdminUrlKind,
) -> Result<T, Error> {
let x_api_token = config.x_api_token().ok_or(ErrorKind::MissingApiToken)?;
let iri = config.generate_admin_url(url_kind);
let res = client
.get(iri.as_str())
.header(XApiToken::http1_name(), x_api_token.to_string())
.send()
.await
.map_err(|e| ErrorKind::SendRequest(iri.to_string(), e.to_string()))?;
if !res.status().is_success() {
return Err(ErrorKind::Status(
iri.to_string(),
crate::http1::status_to_http02(res.status()),
)
.into());
}
let t = res
.json()
.await
.map_err(|e| ErrorKind::ReceiveResponse(iri.to_string(), e.to_string()))?;
Ok(t)
}
async fn post_domains(
client: &ClientWithMiddleware,
config: &Config,
domains: Vec<String>,
url_kind: AdminUrlKind,
) -> Result<(), Error> {
let x_api_token = config.x_api_token().ok_or(ErrorKind::MissingApiToken)?;
let iri = config.generate_admin_url(url_kind);
let res = client
.post(iri.as_str())
.header(XApiToken::http1_name(), x_api_token.to_string())
.json(&Domains { domains })
.send()
.await
.map_err(|e| ErrorKind::SendRequest(iri.to_string(), e.to_string()))?;
if !res.status().is_success() {
tracing::warn!("Failed to allow domains");
}
Ok(())
}

View file

@ -1,90 +0,0 @@
use crate::{
admin::{AllowedDomains, BlockedDomains, ConnectedActors, Domains, LastSeen},
collector::{MemoryCollector, Snapshot},
error::Error,
extractors::Admin,
};
use actix_web::{
web::{Data, Json},
HttpResponse,
};
use std::collections::{BTreeMap, BTreeSet};
use time::OffsetDateTime;
pub(crate) async fn allow(
admin: Admin,
Json(Domains { domains }): Json<Domains>,
) -> Result<HttpResponse, Error> {
admin.db_ref().add_allows(domains).await?;
Ok(HttpResponse::NoContent().finish())
}
pub(crate) async fn disallow(
admin: Admin,
Json(Domains { domains }): Json<Domains>,
) -> Result<HttpResponse, Error> {
admin.db_ref().remove_allows(domains).await?;
Ok(HttpResponse::NoContent().finish())
}
pub(crate) async fn block(
admin: Admin,
Json(Domains { domains }): Json<Domains>,
) -> Result<HttpResponse, Error> {
admin.db_ref().add_blocks(domains).await?;
Ok(HttpResponse::NoContent().finish())
}
pub(crate) async fn unblock(
admin: Admin,
Json(Domains { domains }): Json<Domains>,
) -> Result<HttpResponse, Error> {
admin.db_ref().remove_blocks(domains).await?;
Ok(HttpResponse::NoContent().finish())
}
pub(crate) async fn allowed(admin: Admin) -> Result<Json<AllowedDomains>, Error> {
let allowed_domains = admin.db_ref().allows().await?;
Ok(Json(AllowedDomains { allowed_domains }))
}
pub(crate) async fn blocked(admin: Admin) -> Result<Json<BlockedDomains>, Error> {
let blocked_domains = admin.db_ref().blocks().await?;
Ok(Json(BlockedDomains { blocked_domains }))
}
pub(crate) async fn connected(admin: Admin) -> Result<Json<ConnectedActors>, Error> {
let connected_actors = admin.db_ref().connected_ids().await?;
Ok(Json(ConnectedActors { connected_actors }))
}
pub(crate) async fn stats(
_admin: Admin,
collector: Data<MemoryCollector>,
) -> Result<Json<Snapshot>, Error> {
Ok(Json(collector.snapshot()))
}
pub(crate) async fn last_seen(admin: Admin) -> Result<Json<LastSeen>, Error> {
let nodes = admin.db_ref().last_seen().await?;
let mut last_seen: BTreeMap<OffsetDateTime, BTreeSet<String>> = BTreeMap::new();
let mut never = Vec::new();
for (domain, datetime) in nodes {
if let Some(datetime) = datetime {
last_seen.entry(datetime).or_default().insert(domain);
} else {
never.push(domain);
}
}
Ok(Json(LastSeen { last_seen, never }))
}

View file

@ -1,8 +1,8 @@
use activitystreams::{ use activitystreams::{
activity::ActorAndObject, activity::ActorAndObject,
actor::{Actor, ApActor}, actor::{Actor, ApActor},
iri_string::types::IriString,
unparsed::UnparsedMutExt, unparsed::UnparsedMutExt,
iri_string::types::IriString,
}; };
use activitystreams_ext::{Ext1, UnparsedExtension}; use activitystreams_ext::{Ext1, UnparsedExtension};
@ -34,13 +34,11 @@ pub struct PublicKey {
#[serde(rename_all = "PascalCase")] #[serde(rename_all = "PascalCase")]
pub enum ValidTypes { pub enum ValidTypes {
Accept, Accept,
Add,
Announce, Announce,
Create, Create,
Delete, Delete,
Follow, Follow,
Reject, Reject,
Remove,
Undo, Undo,
Update, Update,
} }

View file

@ -1,42 +1,21 @@
use clap::Parser; use structopt::StructOpt;
#[derive(Debug, Parser)] #[derive(Debug, StructOpt)]
#[structopt(name = "relay", about = "An activitypub relay")] #[structopt(name = "relay", about = "An activitypub relay")]
pub(crate) struct Args { pub(crate) struct Args {
#[arg(short, help = "A list of domains that should be blocked")] #[structopt(short, help = "A list of domains that should be blocked")]
blocks: Vec<String>, blocks: Vec<String>,
#[arg(short, help = "A list of domains that should be allowed")] #[structopt(short, help = "A list of domains that should be allowed")]
allowed: Vec<String>, allowed: Vec<String>,
#[arg(short, long, help = "Undo allowing or blocking domains")] #[structopt(short, long, help = "Undo allowing or blocking domains")]
undo: bool, undo: bool,
#[arg(short, long, help = "List allowed and blocked domains")]
list: bool,
#[arg(short, long, help = "Get statistics from the server")]
stats: bool,
#[arg(
short,
long,
help = "List domains by when they were last succesfully contacted"
)]
contacted: bool,
} }
impl Args { impl Args {
pub(crate) fn any(&self) -> bool {
!self.blocks.is_empty()
|| !self.allowed.is_empty()
|| self.list
|| self.stats
|| self.contacted
}
pub(crate) fn new() -> Self { pub(crate) fn new() -> Self {
Self::parse() Self::from_args()
} }
pub(crate) fn blocks(&self) -> &[String] { pub(crate) fn blocks(&self) -> &[String] {
@ -50,16 +29,4 @@ impl Args {
pub(crate) fn undo(&self) -> bool { pub(crate) fn undo(&self) -> bool {
self.undo self.undo
} }
pub(crate) fn list(&self) -> bool {
self.list
}
pub(crate) fn stats(&self) -> bool {
self.stats
}
pub(crate) fn contacted(&self) -> bool {
self.contacted
}
} }

View file

@ -5,8 +5,7 @@ fn git_info() {
if let Ok(output) = Command::new("git").args(["rev-parse", "HEAD"]).output() { if let Ok(output) = Command::new("git").args(["rev-parse", "HEAD"]).output() {
if output.status.success() { if output.status.success() {
let git_hash = String::from_utf8_lossy(&output.stdout); let git_hash = String::from_utf8_lossy(&output.stdout);
println!("cargo:rustc-env=GIT_HASH={git_hash}"); println!("cargo:rustc-env=GIT_HASH={}", git_hash);
println!("cargo:rustc-env=GIT_SHORT_HASH={}", &git_hash[..8])
} }
} }
@ -16,15 +15,15 @@ fn git_info() {
{ {
if output.status.success() { if output.status.success() {
let git_branch = String::from_utf8_lossy(&output.stdout); let git_branch = String::from_utf8_lossy(&output.stdout);
println!("cargo:rustc-env=GIT_BRANCH={git_branch}"); println!("cargo:rustc-env=GIT_BRANCH={}", git_branch);
} }
} }
} }
fn version_info() -> color_eyre::Result<()> { fn version_info() -> Result<(), anyhow::Error> {
let cargo_toml = Path::new(&std::env::var("CARGO_MANIFEST_DIR")?).join("Cargo.toml"); let cargo_toml = Path::new(&std::env::var("CARGO_MANIFEST_DIR")?).join("Cargo.toml");
let mut file = File::open(cargo_toml)?; let mut file = File::open(&cargo_toml)?;
let mut cargo_data = String::new(); let mut cargo_data = String::new();
file.read_to_string(&mut cargo_data)?; file.read_to_string(&mut cargo_data)?;
@ -32,17 +31,17 @@ fn version_info() -> color_eyre::Result<()> {
let data: toml::Value = toml::from_str(&cargo_data)?; let data: toml::Value = toml::from_str(&cargo_data)?;
if let Some(version) = data["package"]["version"].as_str() { if let Some(version) = data["package"]["version"].as_str() {
println!("cargo:rustc-env=PKG_VERSION={version}"); println!("cargo:rustc-env=PKG_VERSION={}", version);
} }
if let Some(name) = data["package"]["name"].as_str() { if let Some(name) = data["package"]["name"].as_str() {
println!("cargo:rustc-env=PKG_NAME={name}"); println!("cargo:rustc-env=PKG_NAME={}", name);
} }
Ok(()) Ok(())
} }
fn main() -> color_eyre::Result<()> { fn main() -> Result<(), anyhow::Error> {
dotenv::dotenv().ok(); dotenv::dotenv().ok();
git_info(); git_info();

View file

@ -1,425 +0,0 @@
use metrics::{Key, Metadata, Recorder, SetRecorderError};
use metrics_util::{
registry::{AtomicStorage, GenerationalStorage, Recency, Registry},
MetricKindMask, Summary,
};
use quanta::Clock;
use std::{
collections::{BTreeMap, HashMap},
sync::{atomic::Ordering, Arc, RwLock},
time::Duration,
};
const SECONDS: u64 = 1;
const MINUTES: u64 = 60 * SECONDS;
const HOURS: u64 = 60 * MINUTES;
const DAYS: u64 = 24 * HOURS;
pub(crate) fn recordable(len: usize) -> u32 {
((len as u64) % u64::from(u32::MAX)) as u32
}
type DistributionMap = BTreeMap<Vec<(String, String)>, Summary>;
#[derive(Clone)]
pub struct MemoryCollector {
inner: Arc<Inner>,
}
struct Inner {
descriptions: RwLock<HashMap<String, metrics::SharedString>>,
distributions: RwLock<HashMap<String, DistributionMap>>,
recency: Recency<Key>,
registry: Registry<Key, GenerationalStorage<AtomicStorage>>,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
struct Counter {
labels: BTreeMap<String, String>,
value: u64,
}
impl std::fmt::Display for Counter {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let labels = self
.labels
.iter()
.map(|(k, v)| format!("{k}: {v}"))
.collect::<Vec<_>>()
.join(", ");
write!(f, "{labels} - {}", self.value)
}
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
struct Gauge {
labels: BTreeMap<String, String>,
value: f64,
}
impl std::fmt::Display for Gauge {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let labels = self
.labels
.iter()
.map(|(k, v)| format!("{k}: {v}"))
.collect::<Vec<_>>()
.join(", ");
write!(f, "{labels} - {}", self.value)
}
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
struct Histogram {
labels: BTreeMap<String, String>,
value: Vec<(f64, Option<f64>)>,
}
impl std::fmt::Display for Histogram {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let labels = self
.labels
.iter()
.map(|(k, v)| format!("{k}: {v}"))
.collect::<Vec<_>>()
.join(", ");
let value = self
.value
.iter()
.map(|(k, v)| {
if let Some(v) = v {
format!("{k}: {v:.6}")
} else {
format!("{k}: None,")
}
})
.collect::<Vec<_>>()
.join(", ");
write!(f, "{labels} - {value}")
}
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
pub(crate) struct Snapshot {
counters: HashMap<String, Vec<Counter>>,
gauges: HashMap<String, Vec<Gauge>>,
histograms: HashMap<String, Vec<Histogram>>,
}
const PAIRS: [((&str, &str), &str); 2] = [
(
(
"background-jobs.worker.started",
"background-jobs.worker.finished",
),
"background-jobs.worker.running",
),
(
(
"background-jobs.job.started",
"background-jobs.job.finished",
),
"background-jobs.job.running",
),
];
#[derive(Default)]
struct MergeCounter {
start: Option<Counter>,
finish: Option<Counter>,
}
impl MergeCounter {
fn merge(self) -> Option<Counter> {
match (self.start, self.finish) {
(Some(start), Some(end)) => Some(Counter {
labels: start.labels,
value: start.value.saturating_sub(end.value),
}),
(Some(only), None) => Some(only),
(None, Some(only)) => Some(Counter {
labels: only.labels,
value: 0,
}),
(None, None) => None,
}
}
}
impl Snapshot {
pub(crate) fn present(self) {
if !self.counters.is_empty() {
println!("Counters");
let mut merging = HashMap::new();
for (key, counters) in self.counters {
if let Some(((start, _), name)) = PAIRS
.iter()
.find(|((start, finish), _)| *start == key || *finish == key)
{
let entry = merging.entry(name).or_insert_with(HashMap::new);
for counter in counters {
let merge_counter = entry
.entry(counter.labels.clone())
.or_insert_with(MergeCounter::default);
if key == *start {
merge_counter.start = Some(counter);
} else {
merge_counter.finish = Some(counter);
}
}
continue;
}
println!("\t{key}");
for counter in counters {
println!("\t\t{counter}");
}
}
for (key, counters) in merging {
println!("\t{key}");
for (_, counter) in counters {
if let Some(counter) = counter.merge() {
println!("\t\t{counter}");
}
}
}
}
if !self.gauges.is_empty() {
println!("Gauges");
for (key, gauges) in self.gauges {
println!("\t{key}");
for gauge in gauges {
println!("\t\t{gauge}");
}
}
}
if !self.histograms.is_empty() {
println!("Histograms");
for (key, histograms) in self.histograms {
println!("\t{key}");
for histogram in histograms {
println!("\t\t{histogram}");
}
}
}
}
}
fn key_to_parts(key: &Key) -> (String, Vec<(String, String)>) {
let labels = key
.labels()
.map(|label| (label.key().to_string(), label.value().to_string()))
.collect();
let name = key.name().to_string();
(name, labels)
}
impl Inner {
fn snapshot_counters(&self) -> HashMap<String, Vec<Counter>> {
let mut counters = HashMap::new();
for (key, counter) in self.registry.get_counter_handles() {
let gen = counter.get_generation();
if !self.recency.should_store_counter(&key, gen, &self.registry) {
continue;
}
let (name, labels) = key_to_parts(&key);
let value = counter.get_inner().load(Ordering::Acquire);
counters.entry(name).or_insert_with(Vec::new).push(Counter {
labels: labels.into_iter().collect(),
value,
});
}
counters
}
fn snapshot_gauges(&self) -> HashMap<String, Vec<Gauge>> {
let mut gauges = HashMap::new();
for (key, gauge) in self.registry.get_gauge_handles() {
let gen = gauge.get_generation();
if !self.recency.should_store_gauge(&key, gen, &self.registry) {
continue;
}
let (name, labels) = key_to_parts(&key);
let value = f64::from_bits(gauge.get_inner().load(Ordering::Acquire));
gauges.entry(name).or_insert_with(Vec::new).push(Gauge {
labels: labels.into_iter().collect(),
value,
})
}
gauges
}
fn snapshot_histograms(&self) -> HashMap<String, Vec<Histogram>> {
for (key, histogram) in self.registry.get_histogram_handles() {
let gen = histogram.get_generation();
let (name, labels) = key_to_parts(&key);
if !self
.recency
.should_store_histogram(&key, gen, &self.registry)
{
let mut d = self.distributions.write().unwrap();
let delete_by_name = if let Some(by_name) = d.get_mut(&name) {
by_name.remove(&labels);
by_name.is_empty()
} else {
false
};
drop(d);
if delete_by_name {
self.descriptions.write().unwrap().remove(&name);
}
continue;
}
let mut d = self.distributions.write().unwrap();
let outer_entry = d.entry(name.clone()).or_default();
let entry = outer_entry
.entry(labels)
.or_insert_with(Summary::with_defaults);
histogram.get_inner().clear_with(|samples| {
for sample in samples {
entry.add(*sample);
}
});
let mut total_len = 0;
for dist_map in d.values() {
total_len += dist_map.len();
}
metrics::gauge!("relay.collector.distributions.size").set(recordable(total_len));
}
let d = self.distributions.read().unwrap().clone();
d.into_iter()
.map(|(key, value)| {
(
key,
value
.into_iter()
.map(|(labels, summary)| Histogram {
labels: labels.into_iter().collect(),
value: [0.001, 0.01, 0.05, 0.1, 0.5, 0.9, 0.99, 1.0]
.into_iter()
.map(|q| (q, summary.quantile(q)))
.collect(),
})
.collect(),
)
})
.collect()
}
fn snapshot(&self) -> Snapshot {
Snapshot {
counters: self.snapshot_counters(),
gauges: self.snapshot_gauges(),
histograms: self.snapshot_histograms(),
}
}
}
impl MemoryCollector {
pub(crate) fn new() -> Self {
MemoryCollector {
inner: Arc::new(Inner {
descriptions: Default::default(),
distributions: Default::default(),
recency: Recency::new(
Clock::new(),
MetricKindMask::ALL,
Some(Duration::from_secs(5 * DAYS)),
),
registry: Registry::new(GenerationalStorage::atomic()),
}),
}
}
pub(crate) fn snapshot(&self) -> Snapshot {
self.inner.snapshot()
}
fn add_description_if_missing(
&self,
key: &metrics::KeyName,
description: metrics::SharedString,
) {
let mut d = self.inner.descriptions.write().unwrap();
d.entry(key.as_str().to_owned()).or_insert(description);
metrics::gauge!("relay.collector.descriptions.size").set(recordable(d.len()));
}
pub(crate) fn install(&self) -> Result<(), SetRecorderError<Self>> {
metrics::set_global_recorder(self.clone())
}
}
impl Recorder for MemoryCollector {
fn describe_counter(
&self,
key: metrics::KeyName,
_: Option<metrics::Unit>,
description: metrics::SharedString,
) {
self.add_description_if_missing(&key, description)
}
fn describe_gauge(
&self,
key: metrics::KeyName,
_: Option<metrics::Unit>,
description: metrics::SharedString,
) {
self.add_description_if_missing(&key, description)
}
fn describe_histogram(
&self,
key: metrics::KeyName,
_: Option<metrics::Unit>,
description: metrics::SharedString,
) {
self.add_description_if_missing(&key, description)
}
fn register_counter(&self, key: &Key, _: &Metadata<'_>) -> metrics::Counter {
self.inner
.registry
.get_or_create_counter(key, |c| c.clone().into())
}
fn register_gauge(&self, key: &Key, _: &Metadata<'_>) -> metrics::Gauge {
self.inner
.registry
.get_or_create_gauge(key, |c| c.clone().into())
}
fn register_histogram(&self, key: &Key, _: &Metadata<'_>) -> metrics::Histogram {
self.inner
.registry
.get_or_create_histogram(key, |c| c.clone().into())
}
}

View file

@ -1,22 +1,20 @@
use crate::{ use crate::{
data::{ActorCache, State},
error::Error, error::Error,
extractors::{AdminConfig, XApiToken}, middleware::MyVerify,
requests::Requests,
}; };
use activitystreams::{ use activitystreams::{
iri, iri,
iri_string::{ iri_string::{
format::ToDedicatedString,
resolve::FixedBaseResolver, resolve::FixedBaseResolver,
types::{IriAbsoluteString, IriFragmentStr, IriRelativeStr, IriString}, types::{IriAbsoluteString, IriFragmentStr, IriRelativeStr, IriString},
}, },
}; };
use config::Environment; use config::Environment;
use http_signature_normalization_actix::{digest::ring::Sha256, prelude::VerifyDigest}; use http_signature_normalization_actix::prelude::{VerifyDigest, VerifySignature};
use rustls::sign::CertifiedKey; use sha2::{Digest, Sha256};
use std::{ use std::{net::IpAddr, path::PathBuf};
net::{IpAddr, SocketAddr},
path::PathBuf,
};
use uuid::Uuid; use uuid::Uuid;
#[derive(Clone, Debug, serde::Deserialize)] #[derive(Clone, Debug, serde::Deserialize)]
@ -31,24 +29,7 @@ pub(crate) struct ParsedConfig {
publish_blocks: bool, publish_blocks: bool,
sled_path: PathBuf, sled_path: PathBuf,
source_repo: IriString, source_repo: IriString,
repository_commit_base: String,
opentelemetry_url: Option<IriString>, opentelemetry_url: Option<IriString>,
telegram_token: Option<String>,
telegram_admin_handle: Option<String>,
api_token: Option<String>,
tls_key: Option<PathBuf>,
tls_cert: Option<PathBuf>,
footer_blurb: Option<String>,
local_domains: Option<String>,
local_blurb: Option<String>,
prometheus_addr: Option<IpAddr>,
prometheus_port: Option<u16>,
deliver_concurrency: u64,
client_timeout: u64,
proxy_url: Option<IriString>,
proxy_username: Option<String>,
proxy_password: Option<String>,
signature_threads: Option<usize>,
} }
#[derive(Clone)] #[derive(Clone)]
@ -64,36 +45,6 @@ pub struct Config {
sled_path: PathBuf, sled_path: PathBuf,
source_repo: IriString, source_repo: IriString,
opentelemetry_url: Option<IriString>, opentelemetry_url: Option<IriString>,
telegram_token: Option<String>,
telegram_admin_handle: Option<String>,
api_token: Option<String>,
tls: Option<TlsConfig>,
footer_blurb: Option<String>,
local_domains: Vec<String>,
local_blurb: Option<String>,
prometheus_config: Option<PrometheusConfig>,
deliver_concurrency: u64,
client_timeout: u64,
proxy_config: Option<ProxyConfig>,
signature_threads: Option<usize>,
}
#[derive(Clone)]
struct TlsConfig {
key: PathBuf,
cert: PathBuf,
}
#[derive(Clone, Debug)]
struct PrometheusConfig {
addr: IpAddr,
port: u16,
}
#[derive(Clone, Debug)]
struct ProxyConfig {
url: IriString,
auth: Option<(String, String)>,
} }
#[derive(Debug)] #[derive(Debug)]
@ -110,19 +61,6 @@ pub enum UrlKind {
Outbox, Outbox,
} }
#[derive(Debug)]
pub enum AdminUrlKind {
Allow,
Disallow,
Block,
Unblock,
Allowed,
Blocked,
Connected,
Stats,
LastSeen,
}
impl std::fmt::Debug for Config { impl std::fmt::Debug for Config {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Config") f.debug_struct("Config")
@ -140,19 +78,6 @@ impl std::fmt::Debug for Config {
"opentelemetry_url", "opentelemetry_url",
&self.opentelemetry_url.as_ref().map(|url| url.to_string()), &self.opentelemetry_url.as_ref().map(|url| url.to_string()),
) )
.field("telegram_token", &"[redacted]")
.field("telegram_admin_handle", &self.telegram_admin_handle)
.field("api_token", &"[redacted]")
.field("tls_key", &"[redacted]")
.field("tls_cert", &"[redacted]")
.field("footer_blurb", &self.footer_blurb)
.field("local_domains", &self.local_domains)
.field("local_blurb", &self.local_blurb)
.field("prometheus_config", &self.prometheus_config)
.field("deliver_concurrency", &self.deliver_concurrency)
.field("client_timeout", &self.client_timeout)
.field("proxy_config", &self.proxy_config)
.field("signature_threads", &self.signature_threads)
.finish() .finish()
} }
} }
@ -162,102 +87,22 @@ impl Config {
let config = config::Config::builder() let config = config::Config::builder()
.set_default("hostname", "localhost:8080")? .set_default("hostname", "localhost:8080")?
.set_default("addr", "127.0.0.1")? .set_default("addr", "127.0.0.1")?
.set_default("port", 8080u64)? .set_default::<_, u64>("port", 8080)?
.set_default("debug", true)? .set_default("debug", true)?
.set_default("restricted_mode", false)? .set_default("restricted_mode", false)?
.set_default("validate_signatures", true)? .set_default("validate_signatures", false)?
.set_default("https", true)? .set_default("https", false)?
.set_default("publish_blocks", false)? .set_default("publish_blocks", false)?
.set_default("sled_path", "./sled/db-0-34")? .set_default("sled_path", "./sled/db-0-34")?
.set_default("source_repo", "https://git.asonix.dog/asonix/relay")? .set_default("source_repo", "https://git.asonix.dog/asonix/relay")?
.set_default("repository_commit_base", "/src/commit/")?
.set_default("opentelemetry_url", None as Option<&str>)? .set_default("opentelemetry_url", None as Option<&str>)?
.set_default("telegram_token", None as Option<&str>)?
.set_default("telegram_admin_handle", None as Option<&str>)?
.set_default("api_token", None as Option<&str>)?
.set_default("tls_key", None as Option<&str>)?
.set_default("tls_cert", None as Option<&str>)?
.set_default("footer_blurb", None as Option<&str>)?
.set_default("local_domains", None as Option<&str>)?
.set_default("local_blurb", None as Option<&str>)?
.set_default("prometheus_addr", None as Option<&str>)?
.set_default("prometheus_port", None as Option<u16>)?
.set_default("deliver_concurrency", 8u64)?
.set_default("client_timeout", 10u64)?
.set_default("proxy_url", None as Option<&str>)?
.set_default("proxy_username", None as Option<&str>)?
.set_default("proxy_password", None as Option<&str>)?
.set_default("signature_threads", None as Option<u64>)?
.add_source(Environment::default()) .add_source(Environment::default())
.build()?; .build()?;
let config: ParsedConfig = config.try_deserialize()?; let config: ParsedConfig = config.try_deserialize()?;
let scheme = if config.https { "https" } else { "http" }; let scheme = if config.https { "https" } else { "http" };
let base_uri = iri!(format!("{scheme}://{}", config.hostname)).into_absolute(); let base_uri = iri!(format!("{}://{}", scheme, config.hostname)).into_absolute();
let tls = match (config.tls_key, config.tls_cert) {
(Some(key), Some(cert)) => Some(TlsConfig { key, cert }),
(Some(_), None) => {
tracing::warn!("TLS_KEY is set but TLS_CERT isn't , not building TLS config");
None
}
(None, Some(_)) => {
tracing::warn!("TLS_CERT is set but TLS_KEY isn't , not building TLS config");
None
}
(None, None) => None,
};
let local_domains = config
.local_domains
.iter()
.flat_map(|s| s.split(','))
.map(|d| d.to_string())
.collect();
let prometheus_config = match (config.prometheus_addr, config.prometheus_port) {
(Some(addr), Some(port)) => Some(PrometheusConfig { addr, port }),
(Some(_), None) => {
tracing::warn!("PROMETHEUS_ADDR is set but PROMETHEUS_PORT is not set, not building Prometheus config");
None
}
(None, Some(_)) => {
tracing::warn!("PROMETHEUS_PORT is set but PROMETHEUS_ADDR is not set, not building Prometheus config");
None
}
(None, None) => None,
};
let proxy_config = match (config.proxy_username, config.proxy_password) {
(Some(username), Some(password)) => config.proxy_url.map(|url| ProxyConfig {
url,
auth: Some((username, password)),
}),
(Some(_), None) => {
tracing::warn!(
"PROXY_USERNAME is set but PROXY_PASSWORD is not set, not setting Proxy Auth"
);
config.proxy_url.map(|url| ProxyConfig { url, auth: None })
}
(None, Some(_)) => {
tracing::warn!(
"PROXY_PASSWORD is set but PROXY_USERNAME is not set, not setting Proxy Auth"
);
config.proxy_url.map(|url| ProxyConfig { url, auth: None })
}
(None, None) => config.proxy_url.map(|url| ProxyConfig { url, auth: None }),
};
let source_url = match Self::git_hash() {
Some(hash) => format!(
"{}{}{hash}",
config.source_repo, config.repository_commit_base
)
.parse()
.expect("constructed source URL is valid"),
None => config.source_repo.clone(),
};
Ok(Config { Ok(Config {
hostname: config.hostname, hostname: config.hostname,
@ -269,118 +114,11 @@ impl Config {
publish_blocks: config.publish_blocks, publish_blocks: config.publish_blocks,
base_uri, base_uri,
sled_path: config.sled_path, sled_path: config.sled_path,
source_repo: source_url, source_repo: config.source_repo,
opentelemetry_url: config.opentelemetry_url, opentelemetry_url: config.opentelemetry_url,
telegram_token: config.telegram_token,
telegram_admin_handle: config.telegram_admin_handle,
api_token: config.api_token,
tls,
footer_blurb: config.footer_blurb,
local_domains,
local_blurb: config.local_blurb,
prometheus_config,
deliver_concurrency: config.deliver_concurrency,
client_timeout: config.client_timeout,
proxy_config,
signature_threads: config.signature_threads,
}) })
} }
pub(crate) fn signature_threads(&self) -> usize {
self.signature_threads
.unwrap_or_else(|| {
std::thread::available_parallelism()
.map(usize::from)
.map_err(|e| tracing::warn!("Failed to get parallelism, {e}"))
.unwrap_or(1)
})
.max(1)
}
pub(crate) fn client_timeout(&self) -> u64 {
self.client_timeout
}
pub(crate) fn deliver_concurrency(&self) -> u64 {
self.deliver_concurrency
}
pub(crate) fn prometheus_bind_address(&self) -> Option<SocketAddr> {
let config = self.prometheus_config.as_ref()?;
Some((config.addr, config.port).into())
}
pub(crate) async fn open_keys(&self) -> Result<Option<CertifiedKey>, Error> {
let tls = if let Some(tls) = &self.tls {
tls
} else {
tracing::info!("No TLS config present");
return Ok(None);
};
let certs_bytes = tokio::fs::read(&tls.cert).await?;
let certs =
rustls_pemfile::certs(&mut certs_bytes.as_slice()).collect::<Result<Vec<_>, _>>()?;
if certs.is_empty() {
tracing::warn!("No certs read from certificate file");
return Ok(None);
}
let key_bytes = tokio::fs::read(&tls.key).await?;
let key = if let Some(key) = rustls_pemfile::private_key(&mut key_bytes.as_slice())? {
key
} else {
tracing::warn!("Failed to read private key");
return Ok(None);
};
let key = rustls::crypto::ring::sign::any_supported_type(&key)?;
Ok(Some(CertifiedKey::new(certs, key)))
}
pub(crate) fn footer_blurb(&self) -> Option<crate::templates::Html<String>> {
if let Some(blurb) = &self.footer_blurb {
if !blurb.is_empty() {
return Some(crate::templates::Html(
ammonia::Builder::new()
.add_tag_attributes("a", &["rel"])
.add_tag_attributes("area", &["rel"])
.add_tag_attributes("link", &["rel"])
.link_rel(None)
.clean(blurb)
.to_string(),
));
}
}
None
}
pub(crate) fn local_blurb(&self) -> Option<crate::templates::Html<String>> {
if let Some(blurb) = &self.local_blurb {
if !blurb.is_empty() {
return Some(crate::templates::Html(
ammonia::Builder::new()
.add_tag_attributes("a", &["rel"])
.add_tag_attributes("area", &["rel"])
.add_tag_attributes("link", &["rel"])
.link_rel(None)
.clean(blurb)
.to_string(),
));
}
}
None
}
pub(crate) fn local_domains(&self) -> &[String] {
&self.local_domains
}
pub(crate) fn sled_path(&self) -> &PathBuf { pub(crate) fn sled_path(&self) -> &PathBuf {
&self.sled_path &self.sled_path
} }
@ -397,21 +135,16 @@ impl Config {
} }
} }
pub(crate) fn x_api_token(&self) -> Option<XApiToken> { pub(crate) fn signature_middleware(
self.api_token.clone().map(XApiToken::new) &self,
} requests: Requests,
actors: ActorCache,
pub(crate) fn admin_config(&self) -> Option<actix_web::web::Data<AdminConfig>> { state: State,
if let Some(api_token) = &self.api_token { ) -> VerifySignature<MyVerify> {
match AdminConfig::build(api_token) { if self.validate_signatures {
Ok(conf) => Some(actix_web::web::Data::new(conf)), VerifySignature::new(MyVerify(requests, actors, state), Default::default())
Err(e) => {
tracing::error!("Error creating admin config: {e}");
None
}
}
} else { } else {
None VerifySignature::new(MyVerify(requests, actors, state), Default::default()).optional()
} }
} }
@ -445,7 +178,7 @@ impl Config {
pub(crate) fn software_version() -> String { pub(crate) fn software_version() -> String {
if let Some(git) = Self::git_version() { if let Some(git) = Self::git_version() {
return format!("v{}-{git}", Self::version()); return format!("v{}-{}", Self::version(), git);
} }
format!("v{}", Self::version()) format!("v{}", Self::version())
@ -453,9 +186,9 @@ impl Config {
fn git_version() -> Option<String> { fn git_version() -> Option<String> {
let branch = Self::git_branch()?; let branch = Self::git_branch()?;
let hash = Self::git_short_hash()?; let hash = Self::git_hash()?;
Some(format!("{branch}-{hash}")) Some(format!("{}-{}", branch, hash))
} }
fn name() -> &'static str { fn name() -> &'static str {
@ -474,10 +207,6 @@ impl Config {
option_env!("GIT_HASH") option_env!("GIT_HASH")
} }
fn git_short_hash() -> Option<&'static str> {
option_env!("GIT_SHORT_HASH")
}
pub(crate) fn user_agent(&self) -> String { pub(crate) fn user_agent(&self) -> String {
format!( format!(
"{} ({}/{}; +{})", "{} ({}/{}; +{})",
@ -488,12 +217,6 @@ impl Config {
) )
} }
pub(crate) fn proxy_config(&self) -> Option<(&IriString, Option<(&str, &str)>)> {
self.proxy_config.as_ref().map(|ProxyConfig { url, auth }| {
(url, auth.as_ref().map(|(u, p)| (u.as_str(), p.as_str())))
})
}
pub(crate) fn source_code(&self) -> &IriString { pub(crate) fn source_code(&self) -> &IriString {
&self.source_repo &self.source_repo
} }
@ -502,82 +225,42 @@ impl Config {
self.opentelemetry_url.as_ref() self.opentelemetry_url.as_ref()
} }
pub(crate) fn telegram_info(&self) -> Option<(&str, &str)> {
self.telegram_token.as_deref().and_then(|token| {
let handle = self.telegram_admin_handle.as_deref()?;
Some((token, handle))
})
}
pub(crate) fn generate_url(&self, kind: UrlKind) -> IriString { pub(crate) fn generate_url(&self, kind: UrlKind) -> IriString {
self.do_generate_url(kind).expect("Generated valid IRI") self.do_generate_url(kind).expect("Generated valid IRI")
} }
#[tracing::instrument(fields(base_uri = tracing::field::debug(&self.base_uri), kind = tracing::field::debug(&kind)))]
fn do_generate_url(&self, kind: UrlKind) -> Result<IriString, Error> { fn do_generate_url(&self, kind: UrlKind) -> Result<IriString, Error> {
let iri = match kind { let iri = match kind {
UrlKind::Activity => FixedBaseResolver::new(self.base_uri.as_ref()) UrlKind::Activity => FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new(&format!("activity/{}", Uuid::new_v4()))?.as_ref()) .resolve(IriRelativeStr::new(&format!("activity/{}", Uuid::new_v4()))?.as_ref())?,
.try_to_dedicated_string()?,
UrlKind::Actor => FixedBaseResolver::new(self.base_uri.as_ref()) UrlKind::Actor => FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new("actor")?.as_ref()) .resolve(IriRelativeStr::new("actor")?.as_ref())?,
.try_to_dedicated_string()?,
UrlKind::Followers => FixedBaseResolver::new(self.base_uri.as_ref()) UrlKind::Followers => FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new("followers")?.as_ref()) .resolve(IriRelativeStr::new("followers")?.as_ref())?,
.try_to_dedicated_string()?,
UrlKind::Following => FixedBaseResolver::new(self.base_uri.as_ref()) UrlKind::Following => FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new("following")?.as_ref()) .resolve(IriRelativeStr::new("following")?.as_ref())?,
.try_to_dedicated_string()?,
UrlKind::Inbox => FixedBaseResolver::new(self.base_uri.as_ref()) UrlKind::Inbox => FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new("inbox")?.as_ref()) .resolve(IriRelativeStr::new("inbox")?.as_ref())?,
.try_to_dedicated_string()?,
UrlKind::Index => self.base_uri.clone().into(), UrlKind::Index => self.base_uri.clone().into(),
UrlKind::MainKey => { UrlKind::MainKey => {
let actor = IriRelativeStr::new("actor")?; let actor = IriRelativeStr::new("actor")?;
let fragment = IriFragmentStr::new("main-key")?; let fragment = IriFragmentStr::new("main-key")?;
let mut resolved = FixedBaseResolver::new(self.base_uri.as_ref()) let mut resolved =
.resolve(actor.as_ref()) FixedBaseResolver::new(self.base_uri.as_ref()).resolve(actor.as_ref())?;
.try_to_dedicated_string()?;
resolved.set_fragment(Some(fragment)); resolved.set_fragment(Some(fragment));
resolved resolved
} }
UrlKind::Media(uuid) => FixedBaseResolver::new(self.base_uri.as_ref()) UrlKind::Media(uuid) => FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new(&format!("media/{uuid}"))?.as_ref()) .resolve(IriRelativeStr::new(&format!("media/{}", uuid))?.as_ref())?,
.try_to_dedicated_string()?,
UrlKind::NodeInfo => FixedBaseResolver::new(self.base_uri.as_ref()) UrlKind::NodeInfo => FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new("nodeinfo/2.0.json")?.as_ref()) .resolve(IriRelativeStr::new("nodeinfo/2.0.json")?.as_ref())?,
.try_to_dedicated_string()?,
UrlKind::Outbox => FixedBaseResolver::new(self.base_uri.as_ref()) UrlKind::Outbox => FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new("outbox")?.as_ref()) .resolve(IriRelativeStr::new("outbox")?.as_ref())?,
.try_to_dedicated_string()?,
}; };
Ok(iri) Ok(iri)
} }
pub(crate) fn generate_admin_url(&self, kind: AdminUrlKind) -> IriString {
self.do_generate_admin_url(kind)
.expect("Generated valid IRI")
}
fn do_generate_admin_url(&self, kind: AdminUrlKind) -> Result<IriString, Error> {
let path = match kind {
AdminUrlKind::Allow => "api/v1/admin/allow",
AdminUrlKind::Disallow => "api/v1/admin/disallow",
AdminUrlKind::Block => "api/v1/admin/block",
AdminUrlKind::Unblock => "api/v1/admin/unblock",
AdminUrlKind::Allowed => "api/v1/admin/allowed",
AdminUrlKind::Blocked => "api/v1/admin/blocked",
AdminUrlKind::Connected => "api/v1/admin/connected",
AdminUrlKind::Stats => "api/v1/admin/stats",
AdminUrlKind::LastSeen => "api/v1/admin/last_seen",
};
let iri = FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new(path)?.as_ref())
.try_to_dedicated_string()?;
Ok(iri)
}
} }

View file

@ -2,7 +2,7 @@ use crate::{
apub::AcceptedActors, apub::AcceptedActors,
db::{Actor, Db}, db::{Actor, Db},
error::{Error, ErrorKind}, error::{Error, ErrorKind},
requests::{BreakerStrategy, Requests}, requests::Requests,
}; };
use activitystreams::{iri_string::types::IriString, prelude::*}; use activitystreams::{iri_string::types::IriString, prelude::*};
use std::time::{Duration, SystemTime}; use std::time::{Duration, SystemTime};
@ -37,7 +37,7 @@ impl ActorCache {
ActorCache { db } ActorCache { db }
} }
#[tracing::instrument(level = "debug" name = "Get Actor", skip_all, fields(id = id.to_string().as_str()))] #[tracing::instrument(name = "Get Actor", fields(id = id.to_string().as_str(), requests))]
pub(crate) async fn get( pub(crate) async fn get(
&self, &self,
id: &IriString, id: &IriString,
@ -54,26 +54,24 @@ impl ActorCache {
.map(MaybeCached::Fetched) .map(MaybeCached::Fetched)
} }
#[tracing::instrument(level = "debug", name = "Add Connection", skip(self))] #[tracing::instrument(name = "Add Connection")]
pub(crate) async fn add_connection(&self, actor: Actor) -> Result<(), Error> { pub(crate) async fn add_connection(&self, actor: Actor) -> Result<(), Error> {
self.db.add_connection(actor.id.clone()).await?; self.db.add_connection(actor.id.clone()).await?;
self.db.save_actor(actor).await self.db.save_actor(actor).await
} }
#[tracing::instrument(level = "debug", name = "Remove Connection", skip(self))] #[tracing::instrument(name = "Remove Connection")]
pub(crate) async fn remove_connection(&self, actor: &Actor) -> Result<(), Error> { pub(crate) async fn remove_connection(&self, actor: &Actor) -> Result<(), Error> {
self.db.remove_connection(actor.id.clone()).await self.db.remove_connection(actor.id.clone()).await
} }
#[tracing::instrument(level = "debug", name = "Fetch remote actor", skip_all, fields(id = id.to_string().as_str()))] #[tracing::instrument(name = "Fetch remote actor", fields(id = id.to_string().as_str(), requests))]
pub(crate) async fn get_no_cache( pub(crate) async fn get_no_cache(
&self, &self,
id: &IriString, id: &IriString,
requests: &Requests, requests: &Requests,
) -> Result<Actor, Error> { ) -> Result<Actor, Error> {
let accepted_actor = requests let accepted_actor = requests.fetch::<AcceptedActors>(id.as_str()).await?;
.fetch::<AcceptedActors>(id, BreakerStrategy::Require2XX)
.await?;
let input_authority = id.authority_components().ok_or(ErrorKind::MissingDomain)?; let input_authority = id.authority_components().ok_or(ErrorKind::MissingDomain)?;
let accepted_actor_id = accepted_actor let accepted_actor_id = accepted_actor
@ -99,6 +97,6 @@ impl ActorCache {
fn get_inbox(actor: &AcceptedActors) -> Result<&IriString, Error> { fn get_inbox(actor: &AcceptedActors) -> Result<&IriString, Error> {
Ok(actor Ok(actor
.endpoints()? .endpoints()?
.and_then(|e| e.shared_inbox.as_ref()) .and_then(|e| e.shared_inbox)
.unwrap_or(actor.inbox()?)) .unwrap_or(actor.inbox()?))
} }

View file

@ -1,28 +0,0 @@
use activitystreams::iri_string::types::IriStr;
use std::{collections::HashMap, sync::Mutex};
use time::OffsetDateTime;
pub(crate) struct LastOnline {
domains: Mutex<HashMap<String, OffsetDateTime>>,
}
impl LastOnline {
pub(crate) fn mark_seen(&self, iri: &IriStr) {
if let Some(authority) = iri.authority_str() {
let mut guard = self.domains.lock().unwrap();
guard.insert(authority.to_string(), OffsetDateTime::now_utc());
metrics::gauge!("relay.last-online.size",)
.set(crate::collector::recordable(guard.len()));
}
}
pub(crate) fn take(&self) -> HashMap<String, OffsetDateTime> {
std::mem::take(&mut *self.domains.lock().unwrap())
}
pub(crate) fn empty() -> Self {
Self {
domains: Mutex::new(HashMap::default()),
}
}
}

View file

@ -1,7 +1,14 @@
use crate::{db::Db, error::Error}; use crate::{
db::{Db, MediaMeta},
error::Error,
};
use activitystreams::iri_string::types::IriString; use activitystreams::iri_string::types::IriString;
use actix_web::web::Bytes;
use std::time::{Duration, SystemTime};
use uuid::Uuid; use uuid::Uuid;
static MEDIA_DURATION: Duration = Duration::from_secs(60 * 60 * 24 * 2);
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct MediaCache { pub struct MediaCache {
db: Db, db: Db,
@ -12,17 +19,43 @@ impl MediaCache {
MediaCache { db } MediaCache { db }
} }
#[tracing::instrument(level = "debug", name = "Get media uuid", skip_all, fields(url = url.to_string().as_str()))] #[tracing::instrument(name = "Get media uuid", fields(url = url.to_string().as_str()))]
pub(crate) async fn get_uuid(&self, url: IriString) -> Result<Option<Uuid>, Error> { pub(crate) async fn get_uuid(&self, url: IriString) -> Result<Option<Uuid>, Error> {
self.db.media_id(url).await self.db.media_id(url).await
} }
#[tracing::instrument(level = "debug", name = "Get media url", skip(self))] #[tracing::instrument(name = "Get media url")]
pub(crate) async fn get_url(&self, uuid: Uuid) -> Result<Option<IriString>, Error> { pub(crate) async fn get_url(&self, uuid: Uuid) -> Result<Option<IriString>, Error> {
self.db.media_url(uuid).await self.db.media_url(uuid).await
} }
#[tracing::instrument(name = "Store media url", skip_all, fields(url = url.to_string().as_str()))] #[tracing::instrument(name = "Is media outdated")]
pub(crate) async fn is_outdated(&self, uuid: Uuid) -> Result<bool, Error> {
if let Some(meta) = self.db.media_meta(uuid).await? {
if meta.saved_at + MEDIA_DURATION > SystemTime::now() {
return Ok(false);
}
}
Ok(true)
}
#[tracing::instrument(name = "Get media bytes")]
pub(crate) async fn get_bytes(&self, uuid: Uuid) -> Result<Option<(String, Bytes)>, Error> {
if let Some(meta) = self.db.media_meta(uuid).await? {
if meta.saved_at + MEDIA_DURATION > SystemTime::now() {
return self
.db
.media_bytes(uuid)
.await
.map(|opt| opt.map(|bytes| (meta.media_type, bytes)));
}
}
Ok(None)
}
#[tracing::instrument(name = "Store media url", fields(url = url.to_string().as_str()))]
pub(crate) async fn store_url(&self, url: IriString) -> Result<Uuid, Error> { pub(crate) async fn store_url(&self, url: IriString) -> Result<Uuid, Error> {
let uuid = Uuid::new_v4(); let uuid = Uuid::new_v4();
@ -30,4 +63,23 @@ impl MediaCache {
Ok(uuid) Ok(uuid)
} }
#[tracing::instrument(name = "store media bytes", skip(bytes))]
pub(crate) async fn store_bytes(
&self,
uuid: Uuid,
media_type: String,
bytes: Bytes,
) -> Result<(), Error> {
self.db
.save_bytes(
uuid,
MediaMeta {
media_type,
saved_at: SystemTime::now(),
},
bytes,
)
.await
}
} }

View file

@ -1,11 +1,9 @@
mod actor; mod actor;
mod last_online;
mod media; mod media;
mod node; mod node;
mod state; mod state;
pub(crate) use actor::ActorCache; pub(crate) use actor::ActorCache;
pub(crate) use last_online::LastOnline;
pub(crate) use media::MediaCache; pub(crate) use media::MediaCache;
pub(crate) use node::{Node, NodeCache}; pub(crate) use node::{Node, NodeCache};
pub(crate) use state::State; pub(crate) use state::State;

View file

@ -34,7 +34,7 @@ impl NodeCache {
NodeCache { db } NodeCache { db }
} }
#[tracing::instrument(level = "debug", name = "Get nodes", skip(self))] #[tracing::instrument(name = "Get nodes")]
pub(crate) async fn nodes(&self) -> Result<Vec<Node>, Error> { pub(crate) async fn nodes(&self) -> Result<Vec<Node>, Error> {
let infos = self.db.connected_info().await?; let infos = self.db.connected_info().await?;
let instances = self.db.connected_instance().await?; let instances = self.db.connected_instance().await?;
@ -57,7 +57,7 @@ impl NodeCache {
Ok(vec) Ok(vec)
} }
#[tracing::instrument(level = "debug", name = "Is NodeInfo Outdated", skip_all, fields(actor_id = actor_id.to_string().as_str()))] #[tracing::instrument(name = "Is NodeInfo Outdated", fields(actor_id = actor_id.to_string().as_str()))]
pub(crate) async fn is_nodeinfo_outdated(&self, actor_id: IriString) -> bool { pub(crate) async fn is_nodeinfo_outdated(&self, actor_id: IriString) -> bool {
self.db self.db
.info(actor_id) .info(actor_id)
@ -66,7 +66,7 @@ impl NodeCache {
.unwrap_or(true) .unwrap_or(true)
} }
#[tracing::instrument(level = "debug", name = "Is Contact Outdated", skip_all, fields(actor_id = actor_id.to_string().as_str()))] #[tracing::instrument(name = "Is Contact Outdated", fields(actor_id = actor_id.to_string().as_str()))]
pub(crate) async fn is_contact_outdated(&self, actor_id: IriString) -> bool { pub(crate) async fn is_contact_outdated(&self, actor_id: IriString) -> bool {
self.db self.db
.contact(actor_id) .contact(actor_id)
@ -75,7 +75,7 @@ impl NodeCache {
.unwrap_or(true) .unwrap_or(true)
} }
#[tracing::instrument(level = "debug", name = "Is Instance Outdated", skip_all, fields(actor_id = actor_id.to_string().as_str()))] #[tracing::instrument(name = "Is Instance Outdated", fields(actor_id = actor_id.to_string().as_str()))]
pub(crate) async fn is_instance_outdated(&self, actor_id: IriString) -> bool { pub(crate) async fn is_instance_outdated(&self, actor_id: IriString) -> bool {
self.db self.db
.instance(actor_id) .instance(actor_id)
@ -84,7 +84,7 @@ impl NodeCache {
.unwrap_or(true) .unwrap_or(true)
} }
#[tracing::instrument(level = "debug", name = "Save node info", skip_all, fields(actor_id = actor_id.to_string().as_str(), software, version, reg))] #[tracing::instrument(name = "Save node info", fields(actor_id = actor_id.to_string().as_str(), software, version, reg))]
pub(crate) async fn set_info( pub(crate) async fn set_info(
&self, &self,
actor_id: IriString, actor_id: IriString,
@ -106,9 +106,7 @@ impl NodeCache {
} }
#[tracing::instrument( #[tracing::instrument(
level = "debug",
name = "Save instance info", name = "Save instance info",
skip_all,
fields( fields(
actor_id = actor_id.to_string().as_str(), actor_id = actor_id.to_string().as_str(),
title, title,
@ -143,9 +141,7 @@ impl NodeCache {
} }
#[tracing::instrument( #[tracing::instrument(
level = "debug",
name = "Save contact info", name = "Save contact info",
skip_all,
fields( fields(
actor_id = actor_id.to_string().as_str(), actor_id = actor_id.to_string().as_str(),
username, username,
@ -182,7 +178,7 @@ impl Node {
let authority = url.authority_str().ok_or(ErrorKind::MissingDomain)?; let authority = url.authority_str().ok_or(ErrorKind::MissingDomain)?;
let scheme = url.scheme_str(); let scheme = url.scheme_str();
let base = iri!(format!("{scheme}://{authority}")); let base = iri!(format!("{}://{}", scheme, authority));
Ok(Node { Ok(Node {
base, base,

View file

@ -1,28 +1,26 @@
use crate::{ use crate::{
config::{Config, UrlKind},
data::NodeCache, data::NodeCache,
db::Db, db::Db,
error::Error, error::Error,
requests::{Breakers, Requests}, requests::{Breakers, Requests},
spawner::Spawner,
}; };
use activitystreams::iri_string::types::IriString; use activitystreams::iri_string::types::IriString;
use actix_web::web; use actix_web::web;
use async_rwlock::RwLock;
use lru::LruCache; use lru::LruCache;
use rand::thread_rng; use rand::thread_rng;
use reqwest_middleware::ClientWithMiddleware;
use rsa::{RsaPrivateKey, RsaPublicKey}; use rsa::{RsaPrivateKey, RsaPublicKey};
use std::sync::{Arc, RwLock}; use std::sync::Arc;
use tracing::info;
use super::LastOnline;
#[derive(Clone)] #[derive(Clone)]
pub struct State { pub struct State {
pub(crate) requests: Requests,
pub(crate) public_key: RsaPublicKey, pub(crate) public_key: RsaPublicKey,
private_key: RsaPrivateKey,
object_cache: Arc<RwLock<LruCache<IriString, IriString>>>, object_cache: Arc<RwLock<LruCache<IriString, IriString>>>,
pub(crate) node_cache: NodeCache, node_cache: NodeCache,
breakers: Breakers, breakers: Breakers,
pub(crate) last_online: Arc<LastOnline>,
pub(crate) db: Db, pub(crate) db: Db,
} }
@ -37,10 +35,21 @@ impl std::fmt::Debug for State {
} }
impl State { impl State {
pub(crate) fn node_cache(&self) -> NodeCache {
self.node_cache.clone()
}
pub(crate) fn requests(&self, config: &Config) -> Requests {
Requests::new(
config.generate_url(UrlKind::MainKey).to_string(),
self.private_key.clone(),
config.user_agent(),
self.breakers.clone(),
)
}
#[tracing::instrument( #[tracing::instrument(
level = "debug",
name = "Get inboxes for other domains", name = "Get inboxes for other domains",
skip_all,
fields( fields(
existing_inbox = existing_inbox.to_string().as_str(), existing_inbox = existing_inbox.to_string().as_str(),
authority authority
@ -68,32 +77,21 @@ impl State {
.collect()) .collect())
} }
pub(crate) fn is_cached(&self, object_id: &IriString) -> bool { pub(crate) async fn is_cached(&self, object_id: &IriString) -> bool {
self.object_cache.read().unwrap().contains(object_id) self.object_cache.read().await.contains(object_id)
} }
pub(crate) fn cache(&self, object_id: IriString, actor_id: IriString) { pub(crate) async fn cache(&self, object_id: IriString, actor_id: IriString) {
let mut guard = self.object_cache.write().unwrap(); self.object_cache.write().await.put(object_id, actor_id);
guard.put(object_id, actor_id);
metrics::gauge!("relay.object-cache.size").set(crate::collector::recordable(guard.len()));
} }
pub(crate) fn is_connected(&self, iri: &IriString) -> bool { #[tracing::instrument(name = "Building state")]
self.breakers.should_try(iri) pub(crate) async fn build(db: Db) -> Result<Self, Error> {
}
#[tracing::instrument(level = "debug", name = "Building state", skip_all)]
pub(crate) async fn build(
db: Db,
key_id: String,
spawner: Spawner,
client: ClientWithMiddleware,
) -> Result<Self, Error> {
let private_key = if let Ok(Some(key)) = db.private_key().await { let private_key = if let Ok(Some(key)) = db.private_key().await {
tracing::debug!("Using existing key"); info!("Using existing key");
key key
} else { } else {
tracing::info!("Generating new keys"); info!("Generating new keys");
let key = web::block(move || { let key = web::block(move || {
let mut rng = thread_rng(); let mut rng = thread_rng();
RsaPrivateKey::new(&mut rng, 4096) RsaPrivateKey::new(&mut rng, 4096)
@ -107,28 +105,13 @@ impl State {
let public_key = private_key.to_public_key(); let public_key = private_key.to_public_key();
let breakers = Breakers::default();
let last_online = Arc::new(LastOnline::empty());
let requests = Requests::new(
key_id,
private_key,
breakers.clone(),
last_online.clone(),
spawner,
client,
);
let state = State { let state = State {
requests,
public_key, public_key,
object_cache: Arc::new(RwLock::new(LruCache::new( private_key,
(1024 * 8).try_into().expect("nonzero"), object_cache: Arc::new(RwLock::new(LruCache::new(1024 * 8))),
))),
node_cache: NodeCache::new(db.clone()), node_cache: NodeCache::new(db.clone()),
breakers, breakers: Breakers::default(),
db, db,
last_online,
}; };
Ok(state) Ok(state)

367
src/db.rs
View file

@ -3,20 +3,13 @@ use crate::{
error::{Error, ErrorKind}, error::{Error, ErrorKind},
}; };
use activitystreams::iri_string::types::IriString; use activitystreams::iri_string::types::IriString;
use actix_web::web::Bytes;
use rsa::{ use rsa::{
pkcs8::{DecodePrivateKey, EncodePrivateKey}, pkcs8::{FromPrivateKey, ToPrivateKey},
RsaPrivateKey, RsaPrivateKey,
}; };
use sled::{transaction::TransactionError, Batch, Transactional, Tree}; use sled::Tree;
use std::{ use std::{collections::HashMap, sync::Arc, time::SystemTime};
collections::{BTreeMap, HashMap},
sync::{
atomic::{AtomicU64, Ordering},
Arc,
},
time::SystemTime,
};
use time::OffsetDateTime;
use uuid::Uuid; use uuid::Uuid;
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
@ -25,8 +18,6 @@ pub(crate) struct Db {
} }
struct Inner { struct Inner {
healthz: Tree,
healthz_counter: Arc<AtomicU64>,
actor_id_actor: Tree, actor_id_actor: Tree,
public_key_id_actor_id: Tree, public_key_id_actor_id: Tree,
connected_actor_ids: Tree, connected_actor_ids: Tree,
@ -35,10 +26,11 @@ struct Inner {
settings: Tree, settings: Tree,
media_url_media_id: Tree, media_url_media_id: Tree,
media_id_media_url: Tree, media_id_media_url: Tree,
media_id_media_bytes: Tree,
media_id_media_meta: Tree,
actor_id_info: Tree, actor_id_info: Tree,
actor_id_instance: Tree, actor_id_instance: Tree,
actor_id_contact: Tree, actor_id_contact: Tree,
last_seen: Tree,
restricted_mode: bool, restricted_mode: bool,
} }
@ -71,6 +63,12 @@ impl std::fmt::Debug for Actor {
} }
} }
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub(crate) struct MediaMeta {
pub(crate) media_type: String,
pub(crate) saved_at: SystemTime,
}
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)] #[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct Info { pub struct Info {
pub(crate) software: String, pub(crate) software: String,
@ -144,14 +142,6 @@ impl Inner {
.map(|s| String::from_utf8_lossy(&s).to_string()) .map(|s| String::from_utf8_lossy(&s).to_string())
} }
fn allowed(&self) -> impl DoubleEndedIterator<Item = String> {
self.allowed_domains
.iter()
.values()
.filter_map(|res| res.ok())
.map(|s| String::from_utf8_lossy(&s).to_string())
}
fn connected(&self) -> impl DoubleEndedIterator<Item = IriString> { fn connected(&self) -> impl DoubleEndedIterator<Item = IriString> {
self.connected_actor_ids self.connected_actor_ids
.iter() .iter()
@ -247,8 +237,6 @@ impl Db {
fn build_inner(restricted_mode: bool, db: sled::Db) -> Result<Self, Error> { fn build_inner(restricted_mode: bool, db: sled::Db) -> Result<Self, Error> {
Ok(Db { Ok(Db {
inner: Arc::new(Inner { inner: Arc::new(Inner {
healthz: db.open_tree("healthz")?,
healthz_counter: Arc::new(AtomicU64::new(0)),
actor_id_actor: db.open_tree("actor-id-actor")?, actor_id_actor: db.open_tree("actor-id-actor")?,
public_key_id_actor_id: db.open_tree("public-key-id-actor-id")?, public_key_id_actor_id: db.open_tree("public-key-id-actor-id")?,
connected_actor_ids: db.open_tree("connected-actor-ids")?, connected_actor_ids: db.open_tree("connected-actor-ids")?,
@ -257,10 +245,11 @@ impl Db {
settings: db.open_tree("settings")?, settings: db.open_tree("settings")?,
media_url_media_id: db.open_tree("media-url-media-id")?, media_url_media_id: db.open_tree("media-url-media-id")?,
media_id_media_url: db.open_tree("media-id-media-url")?, media_id_media_url: db.open_tree("media-id-media-url")?,
media_id_media_bytes: db.open_tree("media-id-media-bytes")?,
media_id_media_meta: db.open_tree("media-id-media-meta")?,
actor_id_info: db.open_tree("actor-id-info")?, actor_id_info: db.open_tree("actor-id-info")?,
actor_id_instance: db.open_tree("actor-id-instance")?, actor_id_instance: db.open_tree("actor-id-instance")?,
actor_id_contact: db.open_tree("actor-id-contact")?, actor_id_contact: db.open_tree("actor-id-contact")?,
last_seen: db.open_tree("last-seen")?,
restricted_mode, restricted_mode,
}), }),
}) })
@ -268,7 +257,7 @@ impl Db {
async fn unblock<T>( async fn unblock<T>(
&self, &self,
f: impl FnOnce(&Inner) -> Result<T, Error> + Send + 'static, f: impl Fn(&Inner) -> Result<T, Error> + Send + 'static,
) -> Result<T, Error> ) -> Result<T, Error>
where where
T: Send + 'static, T: Send + 'static,
@ -280,68 +269,6 @@ impl Db {
Ok(t) Ok(t)
} }
pub(crate) async fn check_health(&self) -> Result<(), Error> {
let next = self.inner.healthz_counter.fetch_add(1, Ordering::Relaxed);
self.unblock(move |inner| {
let res = inner
.healthz
.insert("healthz", &next.to_be_bytes()[..])
.map_err(Error::from);
metrics::gauge!("relay.db.healthz.size")
.set(crate::collector::recordable(inner.healthz.len()));
res
})
.await?;
self.inner.healthz.flush_async().await?;
self.unblock(move |inner| inner.healthz.get("healthz").map_err(Error::from))
.await?;
Ok(())
}
pub(crate) async fn mark_last_seen(
&self,
nodes: HashMap<String, OffsetDateTime>,
) -> Result<(), Error> {
let mut batch = Batch::default();
for (domain, datetime) in nodes {
let datetime_string = serde_json::to_vec(&datetime)?;
batch.insert(domain.as_bytes(), datetime_string);
}
self.unblock(move |inner| inner.last_seen.apply_batch(batch).map_err(Error::from))
.await
}
pub(crate) async fn last_seen(
&self,
) -> Result<BTreeMap<String, Option<OffsetDateTime>>, Error> {
self.unblock(|inner| {
let mut map = BTreeMap::new();
for iri in inner.connected() {
let Some(authority_str) = iri.authority_str() else {
continue;
};
if let Some(datetime) = inner.last_seen.get(authority_str)? {
map.insert(
authority_str.to_string(),
Some(serde_json::from_slice(&datetime)?),
);
} else {
map.insert(authority_str.to_string(), None);
}
}
Ok(map)
})
.await
}
pub(crate) async fn connected_ids(&self) -> Result<Vec<IriString>, Error> { pub(crate) async fn connected_ids(&self) -> Result<Vec<IriString>, Error> {
self.unblock(|inner| Ok(inner.connected().collect())).await self.unblock(|inner| Ok(inner.connected().collect())).await
} }
@ -354,9 +281,6 @@ impl Db {
.actor_id_info .actor_id_info
.insert(actor_id.as_str().as_bytes(), vec)?; .insert(actor_id.as_str().as_bytes(), vec)?;
metrics::gauge!("relay.db.actor-id-info.size")
.set(crate::collector::recordable(inner.actor_id_info.len()));
Ok(()) Ok(())
}) })
.await .await
@ -364,12 +288,12 @@ impl Db {
pub(crate) async fn info(&self, actor_id: IriString) -> Result<Option<Info>, Error> { pub(crate) async fn info(&self, actor_id: IriString) -> Result<Option<Info>, Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
inner if let Some(ivec) = inner.actor_id_info.get(actor_id.as_str().as_bytes())? {
.actor_id_info let info = serde_json::from_slice(&ivec)?;
.get(actor_id.as_str().as_bytes())? Ok(Some(info))
.map(|ivec| serde_json::from_slice(&ivec)) } else {
.transpose() Ok(None)
.map_err(Error::from) }
}) })
.await .await
} }
@ -391,9 +315,6 @@ impl Db {
.actor_id_instance .actor_id_instance
.insert(actor_id.as_str().as_bytes(), vec)?; .insert(actor_id.as_str().as_bytes(), vec)?;
metrics::gauge!("relay.db.actor-id-instance.size")
.set(crate::collector::recordable(inner.actor_id_instance.len()));
Ok(()) Ok(())
}) })
.await .await
@ -401,12 +322,12 @@ impl Db {
pub(crate) async fn instance(&self, actor_id: IriString) -> Result<Option<Instance>, Error> { pub(crate) async fn instance(&self, actor_id: IriString) -> Result<Option<Instance>, Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
inner if let Some(ivec) = inner.actor_id_instance.get(actor_id.as_str().as_bytes())? {
.actor_id_instance let instance = serde_json::from_slice(&ivec)?;
.get(actor_id.as_str().as_bytes())? Ok(Some(instance))
.map(|ivec| serde_json::from_slice(&ivec)) } else {
.transpose() Ok(None)
.map_err(Error::from) }
}) })
.await .await
} }
@ -428,9 +349,6 @@ impl Db {
.actor_id_contact .actor_id_contact
.insert(actor_id.as_str().as_bytes(), vec)?; .insert(actor_id.as_str().as_bytes(), vec)?;
metrics::gauge!("relay.db.actor-id-contact.size")
.set(crate::collector::recordable(inner.actor_id_contact.len()));
Ok(()) Ok(())
}) })
.await .await
@ -438,12 +356,12 @@ impl Db {
pub(crate) async fn contact(&self, actor_id: IriString) -> Result<Option<Contact>, Error> { pub(crate) async fn contact(&self, actor_id: IriString) -> Result<Option<Contact>, Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
inner if let Some(ivec) = inner.actor_id_contact.get(actor_id.as_str().as_bytes())? {
.actor_id_contact let contact = serde_json::from_slice(&ivec)?;
.get(actor_id.as_str().as_bytes())? Ok(Some(contact))
.map(|ivec| serde_json::from_slice(&ivec)) } else {
.transpose() Ok(None)
.map_err(Error::from) }
}) })
.await .await
} }
@ -461,11 +379,24 @@ impl Db {
inner inner
.media_url_media_id .media_url_media_id
.insert(url.as_str().as_bytes(), id.as_bytes())?; .insert(url.as_str().as_bytes(), id.as_bytes())?;
Ok(())
})
.await
}
metrics::gauge!("relay.db.media-id-media-url.size") pub(crate) async fn save_bytes(
.set(crate::collector::recordable(inner.media_id_media_url.len())); &self,
metrics::gauge!("relay.db.media-url-media-id.size") id: Uuid,
.set(crate::collector::recordable(inner.media_url_media_id.len())); meta: MediaMeta,
bytes: Bytes,
) -> Result<(), Error> {
self.unblock(move |inner| {
let vec = serde_json::to_vec(&meta)?;
inner
.media_id_media_bytes
.insert(id.as_bytes(), bytes.as_ref())?;
inner.media_id_media_meta.insert(id.as_bytes(), vec)?;
Ok(()) Ok(())
}) })
@ -474,20 +405,45 @@ impl Db {
pub(crate) async fn media_id(&self, url: IriString) -> Result<Option<Uuid>, Error> { pub(crate) async fn media_id(&self, url: IriString) -> Result<Option<Uuid>, Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
Ok(inner if let Some(ivec) = inner.media_url_media_id.get(url.as_str().as_bytes())? {
.media_url_media_id Ok(uuid_from_ivec(ivec))
.get(url.as_str().as_bytes())? } else {
.and_then(uuid_from_ivec)) Ok(None)
}
}) })
.await .await
} }
pub(crate) async fn media_url(&self, id: Uuid) -> Result<Option<IriString>, Error> { pub(crate) async fn media_url(&self, id: Uuid) -> Result<Option<IriString>, Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
Ok(inner if let Some(ivec) = inner.media_id_media_url.get(id.as_bytes())? {
.media_id_media_url Ok(url_from_ivec(ivec))
.get(id.as_bytes())? } else {
.and_then(url_from_ivec)) Ok(None)
}
})
.await
}
pub(crate) async fn media_bytes(&self, id: Uuid) -> Result<Option<Bytes>, Error> {
self.unblock(move |inner| {
if let Some(ivec) = inner.media_id_media_bytes.get(id.as_bytes())? {
Ok(Some(Bytes::copy_from_slice(&ivec)))
} else {
Ok(None)
}
})
.await
}
pub(crate) async fn media_meta(&self, id: Uuid) -> Result<Option<MediaMeta>, Error> {
self.unblock(move |inner| {
if let Some(ivec) = inner.media_id_media_meta.get(id.as_bytes())? {
let meta = serde_json::from_slice(&ivec)?;
Ok(Some(meta))
} else {
Ok(None)
}
}) })
.await .await
} }
@ -496,10 +452,6 @@ impl Db {
self.unblock(|inner| Ok(inner.blocks().collect())).await self.unblock(|inner| Ok(inner.blocks().collect())).await
} }
pub(crate) async fn allows(&self) -> Result<Vec<String>, Error> {
self.unblock(|inner| Ok(inner.allowed().collect())).await
}
pub(crate) async fn inboxes(&self) -> Result<Vec<IriString>, Error> { pub(crate) async fn inboxes(&self) -> Result<Vec<IriString>, Error> {
self.unblock(|inner| Ok(inner.connected_actors().map(|actor| actor.inbox).collect())) self.unblock(|inner| Ok(inner.connected_actors().map(|actor| actor.inbox).collect()))
.await .await
@ -508,7 +460,7 @@ impl Db {
pub(crate) async fn is_connected(&self, base_id: IriString) -> Result<bool, Error> { pub(crate) async fn is_connected(&self, base_id: IriString) -> Result<bool, Error> {
let scheme = base_id.scheme_str(); let scheme = base_id.scheme_str();
let authority = base_id.authority_str().ok_or(ErrorKind::MissingDomain)?; let authority = base_id.authority_str().ok_or(ErrorKind::MissingDomain)?;
let prefix = format!("{scheme}://{authority}"); let prefix = format!("{}://{}", scheme, authority);
self.unblock(move |inner| { self.unblock(move |inner| {
let connected = inner let connected = inner
@ -527,22 +479,26 @@ impl Db {
public_key_id: IriString, public_key_id: IriString,
) -> Result<Option<IriString>, Error> { ) -> Result<Option<IriString>, Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
Ok(inner if let Some(ivec) = inner
.public_key_id_actor_id .public_key_id_actor_id
.get(public_key_id.as_str().as_bytes())? .get(public_key_id.as_str().as_bytes())?
.and_then(url_from_ivec)) {
Ok(url_from_ivec(ivec))
} else {
Ok(None)
}
}) })
.await .await
} }
pub(crate) async fn actor(&self, actor_id: IriString) -> Result<Option<Actor>, Error> { pub(crate) async fn actor(&self, actor_id: IriString) -> Result<Option<Actor>, Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
inner if let Some(ivec) = inner.actor_id_actor.get(actor_id.as_str().as_bytes())? {
.actor_id_actor let actor = serde_json::from_slice(&ivec)?;
.get(actor_id.as_str().as_bytes())? Ok(Some(actor))
.map(|ivec| serde_json::from_slice(&ivec)) } else {
.transpose() Ok(None)
.map_err(Error::from) }
}) })
.await .await
} }
@ -558,46 +514,30 @@ impl Db {
inner inner
.actor_id_actor .actor_id_actor
.insert(actor.id.as_str().as_bytes(), vec)?; .insert(actor.id.as_str().as_bytes(), vec)?;
metrics::gauge!("relay.db.public-key-actor-id.size").set(crate::collector::recordable(
inner.public_key_id_actor_id.len(),
));
metrics::gauge!("relay.db.actor-id-actor.size").set(crate::collector::recordable(
inner.public_key_id_actor_id.len(),
));
Ok(()) Ok(())
}) })
.await .await
} }
pub(crate) async fn remove_connection(&self, actor_id: IriString) -> Result<(), Error> { pub(crate) async fn remove_connection(&self, actor_id: IriString) -> Result<(), Error> {
tracing::debug!("Removing Connection: {actor_id}"); tracing::debug!("Removing Connection: {}", actor_id);
self.unblock(move |inner| { self.unblock(move |inner| {
inner inner
.connected_actor_ids .connected_actor_ids
.remove(actor_id.as_str().as_bytes())?; .remove(actor_id.as_str().as_bytes())?;
metrics::gauge!("relay.db.connected-actor-ids.size").set(crate::collector::recordable(
inner.connected_actor_ids.len(),
));
Ok(()) Ok(())
}) })
.await .await
} }
pub(crate) async fn add_connection(&self, actor_id: IriString) -> Result<(), Error> { pub(crate) async fn add_connection(&self, actor_id: IriString) -> Result<(), Error> {
tracing::debug!("Adding Connection: {actor_id}"); tracing::debug!("Adding Connection: {}", actor_id);
self.unblock(move |inner| { self.unblock(move |inner| {
inner inner
.connected_actor_ids .connected_actor_ids
.insert(actor_id.as_str().as_bytes(), actor_id.as_str().as_bytes())?; .insert(actor_id.as_str().as_bytes(), actor_id.as_str().as_bytes())?;
metrics::gauge!("relay.db.connected-actor-ids.size").set(crate::collector::recordable(
inner.connected_actor_ids.len(),
));
Ok(()) Ok(())
}) })
.await .await
@ -605,64 +545,30 @@ impl Db {
pub(crate) async fn add_blocks(&self, domains: Vec<String>) -> Result<(), Error> { pub(crate) async fn add_blocks(&self, domains: Vec<String>) -> Result<(), Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
let connected_by_domain = inner.connected_by_domain(&domains).collect::<Vec<_>>(); for connected in inner.connected_by_domain(&domains) {
inner
let res = ( .connected_actor_ids
&inner.connected_actor_ids, .remove(connected.as_str().as_bytes())?;
&inner.blocked_domains,
&inner.allowed_domains,
)
.transaction(|(connected, blocked, allowed)| {
let mut connected_batch = Batch::default();
let mut blocked_batch = Batch::default();
let mut allowed_batch = Batch::default();
for connected in &connected_by_domain {
connected_batch.remove(connected.as_str().as_bytes());
}
for authority in &domains {
blocked_batch
.insert(domain_key(authority).as_bytes(), authority.as_bytes());
allowed_batch.remove(domain_key(authority).as_bytes());
}
connected.apply_batch(&connected_batch)?;
blocked.apply_batch(&blocked_batch)?;
allowed.apply_batch(&allowed_batch)?;
Ok(())
});
metrics::gauge!("relay.db.connected-actor-ids.size").set(crate::collector::recordable(
inner.connected_actor_ids.len(),
));
metrics::gauge!("relay.db.blocked-domains.size")
.set(crate::collector::recordable(inner.blocked_domains.len()));
metrics::gauge!("relay.db.allowed-domains.size")
.set(crate::collector::recordable(inner.allowed_domains.len()));
match res {
Ok(()) => Ok(()),
Err(TransactionError::Abort(e) | TransactionError::Storage(e)) => Err(e.into()),
} }
for authority in &domains {
inner
.blocked_domains
.insert(domain_key(authority), authority.as_bytes())?;
inner.allowed_domains.remove(domain_key(authority))?;
}
Ok(())
}) })
.await .await
} }
pub(crate) async fn remove_blocks(&self, domains: Vec<String>) -> Result<(), Error> { pub(crate) async fn remove_blocks(&self, domains: Vec<String>) -> Result<(), Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
let mut blocked_batch = Batch::default();
for authority in &domains { for authority in &domains {
blocked_batch.remove(domain_key(authority).as_bytes()); inner.blocked_domains.remove(domain_key(authority))?;
} }
inner.blocked_domains.apply_batch(blocked_batch)?;
metrics::gauge!("relay.db.blocked-domains.size")
.set(crate::collector::recordable(inner.blocked_domains.len()));
Ok(()) Ok(())
}) })
.await .await
@ -670,17 +576,12 @@ impl Db {
pub(crate) async fn add_allows(&self, domains: Vec<String>) -> Result<(), Error> { pub(crate) async fn add_allows(&self, domains: Vec<String>) -> Result<(), Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
let mut allowed_batch = Batch::default();
for authority in &domains { for authority in &domains {
allowed_batch.insert(domain_key(authority).as_bytes(), authority.as_bytes()); inner
.allowed_domains
.insert(domain_key(authority), authority.as_bytes())?;
} }
inner.allowed_domains.apply_batch(allowed_batch)?;
metrics::gauge!("relay.db.allowed-domains.size")
.set(crate::collector::recordable(inner.allowed_domains.len()));
Ok(()) Ok(())
}) })
.await .await
@ -689,32 +590,17 @@ impl Db {
pub(crate) async fn remove_allows(&self, domains: Vec<String>) -> Result<(), Error> { pub(crate) async fn remove_allows(&self, domains: Vec<String>) -> Result<(), Error> {
self.unblock(move |inner| { self.unblock(move |inner| {
if inner.restricted_mode { if inner.restricted_mode {
let connected_by_domain = inner.connected_by_domain(&domains).collect::<Vec<_>>(); for connected in inner.connected_by_domain(&domains) {
inner
let mut connected_batch = Batch::default(); .connected_actor_ids
.remove(connected.as_str().as_bytes())?;
for connected in &connected_by_domain {
connected_batch.remove(connected.as_str().as_bytes());
} }
inner.connected_actor_ids.apply_batch(connected_batch)?;
metrics::gauge!("relay.db.connected-actor-ids.size").set(
crate::collector::recordable(inner.connected_actor_ids.len()),
);
} }
let mut allowed_batch = Batch::default();
for authority in &domains { for authority in &domains {
allowed_batch.remove(domain_key(authority).as_bytes()); inner.allowed_domains.remove(domain_key(authority))?;
} }
inner.allowed_domains.apply_batch(allowed_batch)?;
metrics::gauge!("relay.db.allowed-domains.size")
.set(crate::collector::recordable(inner.allowed_domains.len()));
Ok(()) Ok(())
}) })
.await .await
@ -749,16 +635,12 @@ impl Db {
&self, &self,
private_key: &RsaPrivateKey, private_key: &RsaPrivateKey,
) -> Result<(), Error> { ) -> Result<(), Error> {
let pem_pkcs8 = private_key.to_pkcs8_pem(rsa::pkcs8::LineEnding::default())?; let pem_pkcs8 = private_key.to_pkcs8_pem()?;
self.unblock(move |inner| { self.unblock(move |inner| {
inner inner
.settings .settings
.insert("private-key".as_bytes(), pem_pkcs8.as_bytes())?; .insert("private-key".as_bytes(), pem_pkcs8.as_bytes())?;
metrics::gauge!("relay.db.settings.size")
.set(crate::collector::recordable(inner.settings.len()));
Ok(()) Ok(())
}) })
.await .await
@ -844,11 +726,6 @@ mod tests {
{ {
let db = let db =
Db::build_inner(true, sled::Config::new().temporary(true).open().unwrap()).unwrap(); Db::build_inner(true, sled::Config::new().temporary(true).open().unwrap()).unwrap();
actix_rt::System::new().block_on((f)(db));
tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
.unwrap()
.block_on((f)(db));
} }
} }

View file

@ -1,85 +1,36 @@
use activitystreams::checked::CheckError; use activitystreams::checked::CheckError;
use actix_rt::task::JoinError;
use actix_web::{ use actix_web::{
error::{BlockingError, ResponseError}, error::{BlockingError, ResponseError},
http::StatusCode, http::StatusCode,
HttpResponse, HttpResponse,
}; };
use background_jobs::BoxError; use http_signature_normalization_actix::PrepareSignError;
use color_eyre::eyre::Error as Report; use std::{convert::Infallible, fmt::Debug, io};
use http_signature_normalization_reqwest::SignError; use tracing::error;
use std::{convert::Infallible, io, sync::Arc}; use tracing_error::SpanTrace;
use tokio::task::JoinError;
#[derive(Clone)]
struct ArcKind {
kind: Arc<ErrorKind>,
}
impl std::fmt::Debug for ArcKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.kind.fmt(f)
}
}
impl std::fmt::Display for ArcKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.kind.fmt(f)
}
}
impl std::error::Error for ArcKind {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
self.kind.source()
}
}
pub(crate) struct Error { pub(crate) struct Error {
kind: ArcKind, context: SpanTrace,
display: Box<str>, kind: ErrorKind,
debug: Box<str>,
}
impl Error {
fn kind(&self) -> &ErrorKind {
&self.kind.kind
}
pub(crate) fn is_breaker(&self) -> bool {
matches!(self.kind(), ErrorKind::Breaker)
}
pub(crate) fn is_not_found(&self) -> bool {
matches!(self.kind(), ErrorKind::Status(_, StatusCode::NOT_FOUND))
}
pub(crate) fn is_bad_request(&self) -> bool {
matches!(self.kind(), ErrorKind::Status(_, StatusCode::BAD_REQUEST))
}
pub(crate) fn is_gone(&self) -> bool {
matches!(self.kind(), ErrorKind::Status(_, StatusCode::GONE))
}
pub(crate) fn is_malformed_json(&self) -> bool {
matches!(self.kind(), ErrorKind::Json(_))
}
} }
impl std::fmt::Debug for Error { impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(&self.debug) writeln!(f, "{:?}", self.kind)
} }
} }
impl std::fmt::Display for Error { impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(&self.display) writeln!(f, "{}", self.kind)?;
std::fmt::Display::fmt(&self.context, f)
} }
} }
impl std::error::Error for Error { impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
self.kind().source() self.kind.source()
} }
} }
@ -88,82 +39,47 @@ where
ErrorKind: From<T>, ErrorKind: From<T>,
{ {
fn from(error: T) -> Self { fn from(error: T) -> Self {
let kind = ArcKind {
kind: Arc::new(ErrorKind::from(error)),
};
let report = Report::new(kind.clone());
let display = format!("{report}");
let debug = format!("{report:?}");
Error { Error {
kind, context: SpanTrace::capture(),
display: Box::from(display), kind: error.into(),
debug: Box::from(debug),
} }
} }
} }
#[derive(Debug, thiserror::Error)] #[derive(Debug, thiserror::Error)]
pub(crate) enum ErrorKind { pub(crate) enum ErrorKind {
#[error("Error in extractor")] #[error("Error queueing job, {0}")]
Extractor(#[from] crate::extractors::ErrorKind), Queue(anyhow::Error),
#[error("Error queueing job")] #[error("Error in configuration, {0}")]
Queue(#[from] BoxError),
#[error("Error in configuration")]
Config(#[from] config::ConfigError), Config(#[from] config::ConfigError),
#[error("Couldn't parse key")] #[error("Couldn't parse key, {0}")]
Pkcs8(#[from] rsa::pkcs8::Error), Pkcs8(#[from] rsa::pkcs8::Error),
#[error("Couldn't encode public key")] #[error("Couldn't parse IRI, {0}")]
Spki(#[from] rsa::pkcs8::spki::Error),
#[error("Couldn't sign request")]
SignRequest,
#[error("Response body from server exceeded limits")]
BodyTooLarge,
#[error("Couldn't make request")]
Reqwest(#[from] reqwest::Error),
#[error("Couldn't make request")]
ReqwestMiddleware(#[from] reqwest_middleware::Error),
#[error("Couldn't parse IRI")]
ParseIri(#[from] activitystreams::iri_string::validate::Error), ParseIri(#[from] activitystreams::iri_string::validate::Error),
#[error("Couldn't normalize IRI")] #[error("Couldn't normalize IRI, {0}")]
NormalizeIri(#[from] std::collections::TryReserveError), NormalizeIri(
#[from]
activitystreams::iri_string::task::Error<activitystreams::iri_string::normalize::Error>,
),
#[error("Couldn't perform IO")] #[error("Couldn't perform IO, {0}")]
Io(#[from] io::Error), Io(#[from] io::Error),
#[error("Couldn't sign string, {0}")] #[error("Couldn't sign string, {0}")]
Rsa(rsa::errors::Error), Rsa(rsa::errors::Error),
#[error("Couldn't use db")] #[error("Couldn't use db, {0}")]
Sled(#[from] sled::Error), Sled(#[from] sled::Error),
#[error("Couldn't do the json thing")] #[error("Couldn't do the json thing, {0}")]
Json(#[from] serde_json::Error), Json(#[from] serde_json::Error),
#[error("Couldn't sign request")] #[error("Couldn't build signing string, {0}")]
Sign(#[from] SignError), PrepareSign(#[from] PrepareSignError),
#[error("Couldn't sign digest")]
Signature(#[from] rsa::signature::Error),
#[error("Couldn't prepare TLS private key")]
PrepareKey(#[from] rustls::Error),
#[error("Couldn't verify signature")]
VerifySignature,
#[error("Failed to encode key der")]
DerEncode,
#[error("Couldn't parse the signature header")] #[error("Couldn't parse the signature header")]
HeaderValidation(#[from] actix_web::http::header::InvalidHeaderValue), HeaderValidation(#[from] actix_web::http::header::InvalidHeaderValue),
@ -184,17 +100,20 @@ pub(crate) enum ErrorKind {
BadActor(String, String), BadActor(String, String),
#[error("Signature verification is required, but no signature was given")] #[error("Signature verification is required, but no signature was given")]
NoSignature(Option<String>), NoSignature(String),
#[error("Wrong ActivityPub kind, {0}")] #[error("Wrong ActivityPub kind, {0}")]
Kind(String), Kind(String),
#[error("Too many CPUs")] #[error("Too many CPUs, {0}")]
CpuCount(#[from] std::num::TryFromIntError), CpuCount(#[from] std::num::TryFromIntError),
#[error("Host mismatch")] #[error("{0}")]
HostMismatch(#[from] CheckError), HostMismatch(#[from] CheckError),
#[error("Invalid or missing content type")]
ContentType,
#[error("Couldn't flush buffer")] #[error("Couldn't flush buffer")]
FlushBuffer, FlushBuffer,
@ -239,14 +158,11 @@ pub(crate) enum ErrorKind {
#[error("Failed to extract fields from {0}")] #[error("Failed to extract fields from {0}")]
Extract(&'static str), Extract(&'static str),
#[error("No API Token supplied")]
MissingApiToken,
} }
impl ResponseError for Error { impl ResponseError for Error {
fn status_code(&self) -> StatusCode { fn status_code(&self) -> StatusCode {
match self.kind() { match self.kind {
ErrorKind::NotAllowed(_) | ErrorKind::WrongActor(_) | ErrorKind::BadActor(_, _) => { ErrorKind::NotAllowed(_) | ErrorKind::WrongActor(_) | ErrorKind::BadActor(_, _) => {
StatusCode::FORBIDDEN StatusCode::FORBIDDEN
} }
@ -255,8 +171,7 @@ impl ResponseError for Error {
ErrorKind::Kind(_) ErrorKind::Kind(_)
| ErrorKind::MissingKind | ErrorKind::MissingKind
| ErrorKind::MissingId | ErrorKind::MissingId
| ErrorKind::ObjectCount | ErrorKind::ObjectCount => StatusCode::BAD_REQUEST,
| ErrorKind::NoSignature(_) => StatusCode::BAD_REQUEST,
_ => StatusCode::INTERNAL_SERVER_ERROR, _ => StatusCode::INTERNAL_SERVER_ERROR,
} }
} }
@ -266,7 +181,7 @@ impl ResponseError for Error {
.insert_header(("Content-Type", "application/activity+json")) .insert_header(("Content-Type", "application/activity+json"))
.body( .body(
serde_json::to_string(&serde_json::json!({ serde_json::to_string(&serde_json::json!({
"error": self.kind().to_string(), "error": self.kind.to_string(),
})) }))
.unwrap_or_else(|_| "{}".to_string()), .unwrap_or_else(|_| "{}".to_string()),
) )
@ -296,15 +211,3 @@ impl From<rsa::errors::Error> for ErrorKind {
ErrorKind::Rsa(e) ErrorKind::Rsa(e)
} }
} }
impl From<http_signature_normalization_actix::Canceled> for ErrorKind {
fn from(_: http_signature_normalization_actix::Canceled) -> Self {
Self::Canceled
}
}
impl From<http_signature_normalization_reqwest::Canceled> for ErrorKind {
fn from(_: http_signature_normalization_reqwest::Canceled) -> Self {
Self::Canceled
}
}

View file

@ -1,202 +0,0 @@
use actix_web::{
dev::Payload,
error::ParseError,
http::header::{from_one_raw_str, Header, HeaderName, HeaderValue, TryIntoHeaderValue},
web::Data,
FromRequest, HttpMessage, HttpRequest,
};
use bcrypt::{BcryptError, DEFAULT_COST};
use http_signature_normalization_actix::{prelude::InvalidHeaderValue, Canceled, Spawn};
use std::{convert::Infallible, str::FromStr, time::Instant};
use crate::{db::Db, error::Error, future::LocalBoxFuture, spawner::Spawner};
#[derive(Clone)]
pub(crate) struct AdminConfig {
hashed_api_token: String,
}
impl AdminConfig {
pub(crate) fn build(api_token: &str) -> Result<Self, Error> {
Ok(AdminConfig {
hashed_api_token: bcrypt::hash(api_token, DEFAULT_COST).map_err(Error::bcrypt_hash)?,
})
}
fn verify(&self, token: XApiToken) -> Result<bool, Error> {
bcrypt::verify(token.0, &self.hashed_api_token).map_err(Error::bcrypt_verify)
}
}
pub(crate) struct Admin {
db: Data<Db>,
}
type PrepareTuple = (Data<Db>, Data<AdminConfig>, Data<Spawner>, XApiToken);
impl Admin {
fn prepare_verify(req: &HttpRequest) -> Result<PrepareTuple, Error> {
let hashed_api_token = req
.app_data::<Data<AdminConfig>>()
.ok_or_else(Error::missing_config)?
.clone();
let x_api_token = XApiToken::parse(req).map_err(Error::parse_header)?;
let db = req
.app_data::<Data<Db>>()
.ok_or_else(Error::missing_db)?
.clone();
let spawner = req
.app_data::<Data<Spawner>>()
.ok_or_else(Error::missing_spawner)?
.clone();
Ok((db, hashed_api_token, spawner, x_api_token))
}
#[tracing::instrument(level = "debug", skip_all)]
async fn verify(
hashed_api_token: Data<AdminConfig>,
spawner: Data<Spawner>,
x_api_token: XApiToken,
) -> Result<(), Error> {
let span = tracing::Span::current();
if spawner
.spawn_blocking(move || span.in_scope(|| hashed_api_token.verify(x_api_token)))
.await
.map_err(Error::canceled)??
{
return Ok(());
}
Err(Error::invalid())
}
pub(crate) fn db_ref(&self) -> &Db {
&self.db
}
}
impl Error {
fn invalid() -> Self {
Error::from(ErrorKind::Invalid)
}
fn missing_config() -> Self {
Error::from(ErrorKind::MissingConfig)
}
fn missing_db() -> Self {
Error::from(ErrorKind::MissingDb)
}
fn missing_spawner() -> Self {
Error::from(ErrorKind::MissingSpawner)
}
fn bcrypt_verify(e: BcryptError) -> Self {
Error::from(ErrorKind::BCryptVerify(e))
}
fn bcrypt_hash(e: BcryptError) -> Self {
Error::from(ErrorKind::BCryptHash(e))
}
fn parse_header(e: ParseError) -> Self {
Error::from(ErrorKind::ParseHeader(e))
}
fn canceled(_: Canceled) -> Self {
Error::from(ErrorKind::Canceled)
}
}
#[derive(Debug, thiserror::Error)]
pub(crate) enum ErrorKind {
#[error("Invalid API Token")]
Invalid,
#[error("Missing Config")]
MissingConfig,
#[error("Missing Db")]
MissingDb,
#[error("Missing Spawner")]
MissingSpawner,
#[error("Panic in verify")]
Canceled,
#[error("Verifying")]
BCryptVerify(#[source] BcryptError),
#[error("Hashing")]
BCryptHash(#[source] BcryptError),
#[error("Parse Header")]
ParseHeader(#[source] ParseError),
}
impl FromRequest for Admin {
type Error = Error;
type Future = LocalBoxFuture<'static, Result<Self, Self::Error>>;
fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future {
let now = Instant::now();
let res = Self::prepare_verify(req);
Box::pin(async move {
let (db, c, s, t) = res?;
Self::verify(c, s, t).await?;
metrics::histogram!("relay.admin.verify")
.record(now.elapsed().as_micros() as f64 / 1_000_000_f64);
Ok(Admin { db })
})
}
}
pub(crate) struct XApiToken(String);
impl XApiToken {
pub(crate) fn new(token: String) -> Self {
Self(token)
}
pub(crate) const fn http1_name() -> reqwest::header::HeaderName {
reqwest::header::HeaderName::from_static("x-api-token")
}
}
impl Header for XApiToken {
fn name() -> HeaderName {
HeaderName::from_static("x-api-token")
}
fn parse<M: HttpMessage>(msg: &M) -> Result<Self, ParseError> {
from_one_raw_str(msg.headers().get(Self::name()))
}
}
impl TryIntoHeaderValue for XApiToken {
type Error = InvalidHeaderValue;
fn try_into_value(self) -> Result<HeaderValue, Self::Error> {
HeaderValue::from_str(&self.0)
}
}
impl FromStr for XApiToken {
type Err = Infallible;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(XApiToken(s.to_string()))
}
}
impl std::fmt::Display for XApiToken {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
}
}

View file

@ -1,4 +0,0 @@
use std::{future::Future, pin::Pin};
pub(crate) type LocalBoxFuture<'a, T> = Pin<Box<dyn Future<Output = T> + 'a>>;
pub(crate) type BoxFuture<'a, T> = Pin<Box<dyn Future<Output = T> + Send + 'a>>;

View file

@ -1,18 +0,0 @@
pub(crate) fn name_to_http02(
name: &reqwest::header::HeaderName,
) -> actix_web::http::header::HeaderName {
actix_web::http::header::HeaderName::from_bytes(name.as_ref())
.expect("headername conversions always work")
}
pub(crate) fn value_to_http02(
value: &reqwest::header::HeaderValue,
) -> actix_web::http::header::HeaderValue {
actix_web::http::header::HeaderValue::from_bytes(value.as_bytes())
.expect("headervalue conversions always work")
}
pub(crate) fn status_to_http02(status: reqwest::StatusCode) -> actix_web::http::StatusCode {
actix_web::http::StatusCode::from_u16(status.as_u16())
.expect("statuscode conversions always work")
}

View file

@ -1,201 +0,0 @@
pub mod apub;
mod contact;
mod deliver;
mod deliver_many;
mod instance;
mod nodeinfo;
mod process_listeners;
mod record_last_online;
pub(crate) use self::{
contact::QueryContact, deliver::Deliver, deliver_many::DeliverMany, instance::QueryInstance,
nodeinfo::QueryNodeinfo,
};
use crate::{
config::Config,
data::{ActorCache, MediaCache, State},
error::{Error, ErrorKind},
jobs::{process_listeners::Listeners, record_last_online::RecordLastOnline},
};
use background_jobs::{
memory_storage::{Storage, TokioTimer},
metrics::MetricsStorage,
tokio::{QueueHandle, WorkerConfig},
Job,
};
use std::time::Duration;
fn debug_object(activity: &serde_json::Value) -> &serde_json::Value {
let mut object = &activity["object"]["type"];
if object.is_null() {
object = &activity["object"]["id"];
}
if object.is_null() {
object = &activity["object"];
}
object
}
pub(crate) fn build_storage() -> MetricsStorage<Storage<TokioTimer>> {
MetricsStorage::wrap(Storage::new(TokioTimer))
}
pub(crate) fn create_workers(
storage: MetricsStorage<Storage<TokioTimer>>,
state: State,
actors: ActorCache,
media: MediaCache,
config: Config,
) -> std::io::Result<JobServer> {
let deliver_concurrency = config.deliver_concurrency();
let queue_handle = WorkerConfig::new(storage, move |queue_handle| {
JobState::new(
state.clone(),
actors.clone(),
JobServer::new(queue_handle),
media.clone(),
config.clone(),
)
})
.register::<Deliver>()
.register::<DeliverMany>()
.register::<QueryNodeinfo>()
.register::<QueryInstance>()
.register::<Listeners>()
.register::<QueryContact>()
.register::<RecordLastOnline>()
.register::<apub::Announce>()
.register::<apub::Follow>()
.register::<apub::Forward>()
.register::<apub::Reject>()
.register::<apub::Undo>()
.set_worker_count("maintenance", 2)
.set_worker_count("apub", 2)
.set_worker_count("deliver", deliver_concurrency)
.start()?;
queue_handle.every(Duration::from_secs(60 * 5), Listeners)?;
queue_handle.every(Duration::from_secs(60 * 10), RecordLastOnline)?;
Ok(JobServer::new(queue_handle))
}
#[derive(Clone, Debug)]
pub(crate) struct JobState {
state: State,
actors: ActorCache,
config: Config,
media: MediaCache,
job_server: JobServer,
}
#[derive(Clone)]
pub(crate) struct JobServer {
remote: QueueHandle,
}
impl std::fmt::Debug for JobServer {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("JobServer")
.field("queue_handle", &"QueueHandle")
.finish()
}
}
impl JobState {
fn new(
state: State,
actors: ActorCache,
job_server: JobServer,
media: MediaCache,
config: Config,
) -> Self {
JobState {
state,
actors,
config,
media,
job_server,
}
}
}
impl JobServer {
fn new(remote_handle: QueueHandle) -> Self {
JobServer {
remote: remote_handle,
}
}
pub(crate) async fn queue<J>(&self, job: J) -> Result<(), Error>
where
J: Job,
{
self.remote
.queue(job)
.await
.map_err(ErrorKind::Queue)
.map_err(Into::into)
}
}
struct Boolish {
inner: bool,
}
impl std::ops::Deref for Boolish {
type Target = bool;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<'de> serde::Deserialize<'de> for Boolish {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
#[derive(serde::Deserialize)]
#[serde(untagged)]
enum BoolThing {
Bool(bool),
String(String),
}
let thing: BoolThing = serde::Deserialize::deserialize(deserializer)?;
match thing {
BoolThing::Bool(inner) => Ok(Boolish { inner }),
BoolThing::String(s) if s.to_lowercase() == "false" => Ok(Boolish { inner: false }),
BoolThing::String(_) => Ok(Boolish { inner: true }),
}
}
}
#[cfg(test)]
mod tests {
use super::Boolish;
#[test]
fn boolish_works() {
const CASES: &[(&str, bool)] = &[
("false", false),
("\"false\"", false),
("\"FALSE\"", false),
("true", true),
("\"true\"", true),
("\"anything else\"", true),
];
for (case, output) in CASES {
let b: Boolish = serde_json::from_str(case).unwrap();
assert_eq!(*b, *output);
}
}
}

View file

@ -2,14 +2,14 @@ use crate::{
config::{Config, UrlKind}, config::{Config, UrlKind},
db::Actor, db::Actor,
error::Error, error::Error,
future::BoxFuture,
jobs::{ jobs::{
apub::{get_inboxes, prepare_activity}, apub::{get_inboxes, prepare_activity},
DeliverMany, JobState, DeliverMany, JobState,
}, },
}; };
use activitystreams::{activity::Announce as AsAnnounce, iri_string::types::IriString}; use activitystreams::{activity::Announce as AsAnnounce, iri_string::types::IriString};
use background_jobs::Job; use background_jobs::ActixJob;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Announce { pub(crate) struct Announce {
@ -21,7 +21,7 @@ impl std::fmt::Debug for Announce {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Announce") f.debug_struct("Announce")
.field("object_id", &self.object_id.to_string()) .field("object_id", &self.object_id.to_string())
.field("actor_id", &self.actor.id) .field("actor", &self.actor)
.finish() .finish()
} }
} }
@ -31,7 +31,7 @@ impl Announce {
Announce { object_id, actor } Announce { object_id, actor }
} }
#[tracing::instrument(name = "Announce", skip(state))] #[tracing::instrument(name = "Announce")]
async fn perform(self, state: JobState) -> Result<(), Error> { async fn perform(self, state: JobState) -> Result<(), Error> {
let activity_id = state.config.generate_url(UrlKind::Activity); let activity_id = state.config.generate_url(UrlKind::Activity);
@ -39,10 +39,9 @@ impl Announce {
let inboxes = get_inboxes(&state.state, &self.actor, &self.object_id).await?; let inboxes = get_inboxes(&state.state, &self.actor, &self.object_id).await?;
state state
.job_server .job_server
.queue(DeliverMany::new(inboxes, announce)?) .queue(DeliverMany::new(inboxes, announce)?).await?;
.await?;
state.state.cache(self.object_id, activity_id); state.state.cache(self.object_id, activity_id).await;
Ok(()) Ok(())
} }
} }
@ -62,15 +61,13 @@ fn generate_announce(
) )
} }
impl Job for Announce { impl ActixJob for Announce {
type State = JobState; type State = JobState;
type Error = Error; type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Announce"; const NAME: &'static str = "relay::jobs::apub::Announce";
const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state)) Box::pin(async move { self.perform(state).await.map_err(Into::into) })
} }
} }

View file

@ -3,37 +3,28 @@ use crate::{
config::{Config, UrlKind}, config::{Config, UrlKind},
db::Actor, db::Actor,
error::{Error, ErrorKind}, error::{Error, ErrorKind},
future::BoxFuture,
jobs::{apub::prepare_activity, Deliver, JobState, QueryInstance, QueryNodeinfo}, jobs::{apub::prepare_activity, Deliver, JobState, QueryInstance, QueryNodeinfo},
}; };
use activitystreams::{ use activitystreams::{
activity::{Accept as AsAccept, Follow as AsFollow}, activity::{Accept as AsAccept, Follow as AsFollow},
iri_string::types::IriString,
prelude::*, prelude::*,
iri_string::types::IriString,
}; };
use background_jobs::Job; use background_jobs::ActixJob;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub(crate) struct Follow { pub(crate) struct Follow {
input: AcceptedActivities, input: AcceptedActivities,
actor: Actor, actor: Actor,
} }
impl std::fmt::Debug for Follow {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Follow")
.field("input", &self.input.id_unchecked())
.field("actor", &self.actor.id)
.finish()
}
}
impl Follow { impl Follow {
pub fn new(input: AcceptedActivities, actor: Actor) -> Self { pub fn new(input: AcceptedActivities, actor: Actor) -> Self {
Follow { input, actor } Follow { input, actor }
} }
#[tracing::instrument(name = "Follow", skip(state))] #[tracing::instrument(name = "Follow")]
async fn perform(self, state: JobState) -> Result<(), Error> { async fn perform(self, state: JobState) -> Result<(), Error> {
let my_id = state.config.generate_url(UrlKind::Actor); let my_id = state.config.generate_url(UrlKind::Actor);
@ -44,8 +35,7 @@ impl Follow {
let follow = generate_follow(&state.config, &self.actor.id, &my_id)?; let follow = generate_follow(&state.config, &self.actor.id, &my_id)?;
state state
.job_server .job_server
.queue(Deliver::new(self.actor.inbox.clone(), follow)?) .queue(Deliver::new(self.actor.inbox.clone(), follow)?).await?;
.await?;
} }
state.actors.add_connection(self.actor.clone()).await?; state.actors.add_connection(self.actor.clone()).await?;
@ -59,29 +49,20 @@ impl Follow {
state state
.job_server .job_server
.queue(Deliver::new(self.actor.inbox, accept)?) .queue(Deliver::new(self.actor.inbox, accept)?).await?;
.await?;
state state
.job_server .job_server
.queue(QueryInstance::new(self.actor.id.clone())) .queue(QueryInstance::new(self.actor.id.clone())).await?;
.await?;
state state.job_server.queue(QueryNodeinfo::new(self.actor.id)).await?;
.job_server
.queue(QueryNodeinfo::new(self.actor.id))
.await?;
Ok(()) Ok(())
} }
} }
// Generate a type that says "I want to follow you" // Generate a type that says "I want to follow you"
fn generate_follow( fn generate_follow(config: &Config, actor_id: &IriString, my_id: &IriString) -> Result<AsFollow, Error> {
config: &Config,
actor_id: &IriString,
my_id: &IriString,
) -> Result<AsFollow, Error> {
let follow = AsFollow::new(my_id.clone(), actor_id.clone()); let follow = AsFollow::new(my_id.clone(), actor_id.clone());
prepare_activity( prepare_activity(
@ -111,15 +92,13 @@ fn generate_accept_follow(
) )
} }
impl Job for Follow { impl ActixJob for Follow {
type State = JobState; type State = JobState;
type Error = Error; type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Follow"; const NAME: &'static str = "relay::jobs::apub::Follow";
const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state)) Box::pin(async move { self.perform(state).await.map_err(Into::into) })
} }
} }

View file

@ -2,33 +2,24 @@ use crate::{
apub::AcceptedActivities, apub::AcceptedActivities,
db::Actor, db::Actor,
error::{Error, ErrorKind}, error::{Error, ErrorKind},
future::BoxFuture,
jobs::{apub::get_inboxes, DeliverMany, JobState}, jobs::{apub::get_inboxes, DeliverMany, JobState},
}; };
use activitystreams::prelude::*; use activitystreams::prelude::*;
use background_jobs::Job; use background_jobs::ActixJob;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub(crate) struct Forward { pub(crate) struct Forward {
input: AcceptedActivities, input: AcceptedActivities,
actor: Actor, actor: Actor,
} }
impl std::fmt::Debug for Forward {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Forward")
.field("input", &self.input.id_unchecked())
.field("actor", &self.actor.id)
.finish()
}
}
impl Forward { impl Forward {
pub fn new(input: AcceptedActivities, actor: Actor) -> Self { pub fn new(input: AcceptedActivities, actor: Actor) -> Self {
Forward { input, actor } Forward { input, actor }
} }
#[tracing::instrument(name = "Forward", skip(state))] #[tracing::instrument(name = "Forward")]
async fn perform(self, state: JobState) -> Result<(), Error> { async fn perform(self, state: JobState) -> Result<(), Error> {
let object_id = self let object_id = self
.input .input
@ -47,15 +38,13 @@ impl Forward {
} }
} }
impl Job for Forward { impl ActixJob for Forward {
type State = JobState; type State = JobState;
type Error = Error; type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Forward"; const NAME: &'static str = "relay::jobs::apub::Forward";
const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state)) Box::pin(async move { self.perform(state).await.map_err(Into::into) })
} }
} }

View file

@ -36,13 +36,13 @@ async fn get_inboxes(
state.inboxes_without(&actor.inbox, &authority).await state.inboxes_without(&actor.inbox, &authority).await
} }
fn prepare_activity<T, U, V>( fn prepare_activity<T, U, V, Kind>(
mut t: T, mut t: T,
id: impl TryInto<IriString, Error = U>, id: impl TryInto<IriString, Error = U>,
to: impl TryInto<IriString, Error = V>, to: impl TryInto<IriString, Error = V>,
) -> Result<T, Error> ) -> Result<T, Error>
where where
T: ObjectExt + BaseExt, T: ObjectExt<Kind> + BaseExt<Kind>,
Error: From<U> + From<V>, Error: From<U> + From<V>,
{ {
t.set_id(id.try_into()?) t.set_id(id.try_into()?)

View file

@ -2,46 +2,35 @@ use crate::{
config::UrlKind, config::UrlKind,
db::Actor, db::Actor,
error::Error, error::Error,
future::BoxFuture,
jobs::{apub::generate_undo_follow, Deliver, JobState}, jobs::{apub::generate_undo_follow, Deliver, JobState},
}; };
use background_jobs::Job; use background_jobs::ActixJob;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub(crate) struct Reject(pub(crate) Actor); pub(crate) struct Reject(pub(crate) Actor);
impl std::fmt::Debug for Reject {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Reject").field("actor", &self.0.id).finish()
}
}
impl Reject { impl Reject {
#[tracing::instrument(name = "Reject", skip(state))] #[tracing::instrument(name = "Reject")]
async fn perform(self, state: JobState) -> Result<(), Error> { async fn perform(self, state: JobState) -> Result<(), Error> {
state.actors.remove_connection(&self.0).await?; state.actors.remove_connection(&self.0).await?;
let my_id = state.config.generate_url(UrlKind::Actor); let my_id = state.config.generate_url(UrlKind::Actor);
let undo = generate_undo_follow(&state.config, &self.0.id, &my_id)?; let undo = generate_undo_follow(&state.config, &self.0.id, &my_id)?;
state state.job_server.queue(Deliver::new(self.0.inbox, undo)?).await?;
.job_server
.queue(Deliver::new(self.0.inbox, undo)?)
.await?;
Ok(()) Ok(())
} }
} }
impl Job for Reject { impl ActixJob for Reject {
type State = JobState; type State = JobState;
type Error = Error; type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Reject"; const NAME: &'static str = "relay::jobs::apub::Reject";
const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state)) Box::pin(async move { self.perform(state).await.map_err(Into::into) })
} }
} }

View file

@ -3,33 +3,23 @@ use crate::{
config::UrlKind, config::UrlKind,
db::Actor, db::Actor,
error::Error, error::Error,
future::BoxFuture,
jobs::{apub::generate_undo_follow, Deliver, JobState}, jobs::{apub::generate_undo_follow, Deliver, JobState},
}; };
use activitystreams::prelude::BaseExt; use background_jobs::ActixJob;
use background_jobs::Job; use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub(crate) struct Undo { pub(crate) struct Undo {
input: AcceptedActivities, input: AcceptedActivities,
actor: Actor, actor: Actor,
} }
impl std::fmt::Debug for Undo {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Undo")
.field("input", &self.input.id_unchecked())
.field("actor", &self.actor.id)
.finish()
}
}
impl Undo { impl Undo {
pub(crate) fn new(input: AcceptedActivities, actor: Actor) -> Self { pub(crate) fn new(input: AcceptedActivities, actor: Actor) -> Self {
Undo { input, actor } Undo { input, actor }
} }
#[tracing::instrument(name = "Undo", skip(state))] #[tracing::instrument(name = "Undo")]
async fn perform(self, state: JobState) -> Result<(), Error> { async fn perform(self, state: JobState) -> Result<(), Error> {
let was_following = state.state.db.is_connected(self.actor.id.clone()).await?; let was_following = state.state.db.is_connected(self.actor.id.clone()).await?;
@ -40,23 +30,20 @@ impl Undo {
let undo = generate_undo_follow(&state.config, &self.actor.id, &my_id)?; let undo = generate_undo_follow(&state.config, &self.actor.id, &my_id)?;
state state
.job_server .job_server
.queue(Deliver::new(self.actor.inbox, undo)?) .queue(Deliver::new(self.actor.inbox, undo)?).await?;
.await?;
} }
Ok(()) Ok(())
} }
} }
impl Job for Undo { impl ActixJob for Undo {
type State = JobState; type State = JobState;
type Error = Error; type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::apub::Undo"; const NAME: &'static str = "relay::jobs::apub::Undo";
const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state)) Box::pin(async move { self.perform(state).await.map_err(Into::into) })
} }
} }

44
src/jobs/cache_media.rs Normal file
View file

@ -0,0 +1,44 @@
use crate::{error::Error, jobs::JobState};
use background_jobs::ActixJob;
use std::{future::Future, pin::Pin};
use uuid::Uuid;
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub(crate) struct CacheMedia {
uuid: Uuid,
}
impl CacheMedia {
pub(crate) fn new(uuid: Uuid) -> Self {
CacheMedia { uuid }
}
#[tracing::instrument(name = "Cache media")]
async fn perform(self, state: JobState) -> Result<(), Error> {
if !state.media.is_outdated(self.uuid).await? {
return Ok(());
}
if let Some(url) = state.media.get_url(self.uuid).await? {
let (content_type, bytes) = state.requests.fetch_bytes(url.as_str()).await?;
state
.media
.store_bytes(self.uuid, content_type, bytes)
.await?;
}
Ok(())
}
}
impl ActixJob for CacheMedia {
type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
const NAME: &'static str = "relay::jobs::CacheMedia";
fn run(self, state: Self::State) -> Self::Future {
Box::pin(async move { self.perform(state).await.map_err(Into::into) })
}
}

View file

@ -1,12 +1,11 @@
use crate::{ use crate::{
apub::AcceptedActors, apub::AcceptedActors,
error::{Error, ErrorKind}, error::{Error, ErrorKind},
future::BoxFuture,
jobs::JobState, jobs::JobState,
requests::BreakerStrategy,
}; };
use activitystreams::{iri_string::types::IriString, object::Image, prelude::*}; use activitystreams::{object::Image, prelude::*, iri_string::types::IriString};
use background_jobs::Job; use background_jobs::ActixJob;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct QueryContact { pub(crate) struct QueryContact {
@ -33,7 +32,6 @@ impl QueryContact {
async fn perform(self, state: JobState) -> Result<(), Error> { async fn perform(self, state: JobState) -> Result<(), Error> {
let contact_outdated = state let contact_outdated = state
.state
.node_cache .node_cache
.is_contact_outdated(self.actor_id.clone()) .is_contact_outdated(self.actor_id.clone())
.await; .await;
@ -42,25 +40,15 @@ impl QueryContact {
return Ok(()); return Ok(());
} }
let contact = match state let contact = state
.state
.requests .requests
.fetch::<AcceptedActors>(&self.contact_id, BreakerStrategy::Allow404AndBelow) .fetch::<AcceptedActors>(self.contact_id.as_str())
.await .await?;
{
Ok(contact) => contact,
Err(e) if e.is_breaker() => {
tracing::debug!("Not retrying due to failed breaker");
return Ok(());
}
Err(e) => return Err(e),
};
let (username, display_name, url, avatar) = let (username, display_name, url, avatar) =
to_contact(contact).ok_or(ErrorKind::Extract("contact"))?; to_contact(contact).ok_or(ErrorKind::Extract("contact"))?;
state state
.state
.node_cache .node_cache
.set_contact(self.actor_id, username, display_name, url, avatar) .set_contact(self.actor_id, username, display_name, url, avatar)
.await?; .await?;
@ -85,16 +73,14 @@ fn to_contact(contact: AcceptedActors) -> Option<(String, String, IriString, Iri
Some((username, display_name, url, avatar)) Some((username, display_name, url, avatar))
} }
impl Job for QueryContact { impl ActixJob for QueryContact {
type State = JobState; type State = JobState;
type Error = Error; type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::QueryContact"; const NAME: &'static str = "relay::jobs::QueryContact";
const QUEUE: &'static str = "maintenance";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state)) Box::pin(async move { self.perform(state).await.map_err(Into::into) })
} }
} }
@ -102,7 +88,7 @@ impl Job for QueryContact {
mod tests { mod tests {
use super::to_contact; use super::to_contact;
const HYNET_ADMIN: &str = r#"{"@context":["https://www.w3.org/ns/activitystreams","https://soc.hyena.network/schemas/litepub-0.1.jsonld",{"@language":"und"}],"alsoKnownAs":[],"attachment":[{"name":"Website","type":"PropertyValue","value":"https://hyena.network/"},{"name":"Services","type":"PropertyValue","value":"Pleroma, Invidious, SearX, XMPP"},{"name":"CW","type":"PropertyValue","value":"all long posts"}],"capabilities":{"acceptsChatMessages":true},"discoverable":true,"endpoints":{"oauthAuthorizationEndpoint":"https://soc.hyena.network/oauth/authorize","oauthRegistrationEndpoint":"https://soc.hyena.network/api/v1/apps","oauthTokenEndpoint":"https://soc.hyena.network/oauth/token","sharedInbox":"https://soc.hyena.network/inbox","uploadMedia":"https://soc.hyena.network/api/ap/upload_media"},"followers":"https://soc.hyena.network/users/HyNET/followers","following":"https://soc.hyena.network/users/HyNET/following","icon":{"type":"Image","url":"https://soc.hyena.network/media/ab149b1e0196ffdbecc6830c7f6f1a14dd8d8408ec7db0f1e8ad9d40e600ea73.gif"},"id":"https://soc.hyena.network/users/HyNET","image":{"type":"Image","url":"https://soc.hyena.network/media/12ba78d3015e13aa65ac4e106e574dd7bf959614585f10ce85de40e0148da677.png"},"inbox":"https://soc.hyena.network/users/HyNET/inbox","manuallyApprovesFollowers":false,"name":"HyNET Announcement System :glider:","outbox":"https://soc.hyena.network/users/HyNET/outbox","preferredUsername":"HyNET","publicKey":{"id":"https://soc.hyena.network/users/HyNET#main-key","owner":"https://soc.hyena.network/users/HyNET","publicKeyPem":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyF74womumWRhR7RW4Q6a\n2+Av/Ue8QHiKwjQARJEakbKnKgkI5FRFVVOfMiYVJp/juNt4GLgK15panBqJa9Yt\nWACiHQjBd2yVI5tIHiae0uBj5SdUVuduoycVLG0lpJsg12p8m/vL1oaeLqehTqa6\nsYplQh1GCLet0cUdn/66Cj2pAPD3V7Bz3VnG+oyXIsGQbBB8RHnWhFH8b0qQOyur\nJRAB8aye6QAL2sQbfISM2lycWzNeIHkqsUb7FdqdhQ+Ze0rETRGDkOO2Qvpg0hQm\n6owMsHnHA/DzyOHLy6Yf+I3OUlBC/P1SSAKwORsifFDXL322AEqoDi5ZpwzG9m5z\nAQIDAQAB\n-----END PUBLIC KEY-----\n\n"},"summary":"Ran by <span class=\"h-card\"><a class=\"u-url mention\" data-user=\"9s8j4AHGt3ED0P0b6e\" href=\"https://soc.hyena.network/users/mel\" rel=\"ugc\">@<span>mel</span></a></span> :adm1::adm2: <br/>For direct help with the service, send <span class=\"h-card\"><a class=\"u-url mention\" data-user=\"9s8j4AHGt3ED0P0b6e\" href=\"https://soc.hyena.network/users/mel\" rel=\"ugc\">@<span>mel</span></a></span> a message.","tag":[{"icon":{"type":"Image","url":"https://soc.hyena.network/emoji/Signs/adm1.png"},"id":"https://soc.hyena.network/emoji/Signs/adm1.png","name":":adm1:","type":"Emoji","updated":"1970-01-01T00:00:00Z"},{"icon":{"type":"Image","url":"https://soc.hyena.network/emoji/Signs/adm2.png"},"id":"https://soc.hyena.network/emoji/Signs/adm2.png","name":":adm2:","type":"Emoji","updated":"1970-01-01T00:00:00Z"},{"icon":{"type":"Image","url":"https://soc.hyena.network/emoji/misc/glider.png"},"id":"https://soc.hyena.network/emoji/misc/glider.png","name":":glider:","type":"Emoji","updated":"1970-01-01T00:00:00Z"}],"type":"Service","url":"https://soc.hyena.network/users/HyNET"}"#; const HYNET_ADMIN: &'static str = r#"{"@context":["https://www.w3.org/ns/activitystreams","https://soc.hyena.network/schemas/litepub-0.1.jsonld",{"@language":"und"}],"alsoKnownAs":[],"attachment":[{"name":"Website","type":"PropertyValue","value":"https://hyena.network/"},{"name":"Services","type":"PropertyValue","value":"Pleroma, Invidious, SearX, XMPP"},{"name":"CW","type":"PropertyValue","value":"all long posts"}],"capabilities":{"acceptsChatMessages":true},"discoverable":true,"endpoints":{"oauthAuthorizationEndpoint":"https://soc.hyena.network/oauth/authorize","oauthRegistrationEndpoint":"https://soc.hyena.network/api/v1/apps","oauthTokenEndpoint":"https://soc.hyena.network/oauth/token","sharedInbox":"https://soc.hyena.network/inbox","uploadMedia":"https://soc.hyena.network/api/ap/upload_media"},"followers":"https://soc.hyena.network/users/HyNET/followers","following":"https://soc.hyena.network/users/HyNET/following","icon":{"type":"Image","url":"https://soc.hyena.network/media/ab149b1e0196ffdbecc6830c7f6f1a14dd8d8408ec7db0f1e8ad9d40e600ea73.gif"},"id":"https://soc.hyena.network/users/HyNET","image":{"type":"Image","url":"https://soc.hyena.network/media/12ba78d3015e13aa65ac4e106e574dd7bf959614585f10ce85de40e0148da677.png"},"inbox":"https://soc.hyena.network/users/HyNET/inbox","manuallyApprovesFollowers":false,"name":"HyNET Announcement System :glider:","outbox":"https://soc.hyena.network/users/HyNET/outbox","preferredUsername":"HyNET","publicKey":{"id":"https://soc.hyena.network/users/HyNET#main-key","owner":"https://soc.hyena.network/users/HyNET","publicKeyPem":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyF74womumWRhR7RW4Q6a\n2+Av/Ue8QHiKwjQARJEakbKnKgkI5FRFVVOfMiYVJp/juNt4GLgK15panBqJa9Yt\nWACiHQjBd2yVI5tIHiae0uBj5SdUVuduoycVLG0lpJsg12p8m/vL1oaeLqehTqa6\nsYplQh1GCLet0cUdn/66Cj2pAPD3V7Bz3VnG+oyXIsGQbBB8RHnWhFH8b0qQOyur\nJRAB8aye6QAL2sQbfISM2lycWzNeIHkqsUb7FdqdhQ+Ze0rETRGDkOO2Qvpg0hQm\n6owMsHnHA/DzyOHLy6Yf+I3OUlBC/P1SSAKwORsifFDXL322AEqoDi5ZpwzG9m5z\nAQIDAQAB\n-----END PUBLIC KEY-----\n\n"},"summary":"Ran by <span class=\"h-card\"><a class=\"u-url mention\" data-user=\"9s8j4AHGt3ED0P0b6e\" href=\"https://soc.hyena.network/users/mel\" rel=\"ugc\">@<span>mel</span></a></span> :adm1::adm2: <br/>For direct help with the service, send <span class=\"h-card\"><a class=\"u-url mention\" data-user=\"9s8j4AHGt3ED0P0b6e\" href=\"https://soc.hyena.network/users/mel\" rel=\"ugc\">@<span>mel</span></a></span> a message.","tag":[{"icon":{"type":"Image","url":"https://soc.hyena.network/emoji/Signs/adm1.png"},"id":"https://soc.hyena.network/emoji/Signs/adm1.png","name":":adm1:","type":"Emoji","updated":"1970-01-01T00:00:00Z"},{"icon":{"type":"Image","url":"https://soc.hyena.network/emoji/Signs/adm2.png"},"id":"https://soc.hyena.network/emoji/Signs/adm2.png","name":":adm2:","type":"Emoji","updated":"1970-01-01T00:00:00Z"},{"icon":{"type":"Image","url":"https://soc.hyena.network/emoji/misc/glider.png"},"id":"https://soc.hyena.network/emoji/misc/glider.png","name":":glider:","type":"Emoji","updated":"1970-01-01T00:00:00Z"}],"type":"Service","url":"https://soc.hyena.network/users/HyNET"}"#;
#[test] #[test]
fn parse_hynet() { fn parse_hynet() {

View file

@ -1,11 +1,7 @@
use crate::{ use crate::{error::Error, jobs::JobState};
error::Error,
future::BoxFuture,
jobs::{debug_object, JobState},
requests::BreakerStrategy,
};
use activitystreams::iri_string::types::IriString; use activitystreams::iri_string::types::IriString;
use background_jobs::{Backoff, Job}; use background_jobs::{ActixJob, Backoff};
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Deliver { pub(crate) struct Deliver {
@ -17,8 +13,7 @@ impl std::fmt::Debug for Deliver {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Deliver") f.debug_struct("Deliver")
.field("to", &self.to.to_string()) .field("to", &self.to.to_string())
.field("activity", &self.data["type"]) .field("data", &self.data)
.field("object", debug_object(&self.data))
.finish() .finish()
} }
} }
@ -34,38 +29,21 @@ impl Deliver {
}) })
} }
#[tracing::instrument(name = "Deliver", skip(state))] #[tracing::instrument(name = "Deliver")]
async fn perform(self, state: JobState) -> Result<(), Error> { async fn permform(self, state: JobState) -> Result<(), Error> {
if let Err(e) = state state.requests.deliver(self.to, &self.data).await?;
.state
.requests
.deliver(&self.to, &self.data, BreakerStrategy::Allow401AndBelow)
.await
{
if e.is_breaker() {
tracing::debug!("Not trying due to failed breaker");
return Ok(());
}
if e.is_bad_request() {
tracing::debug!("Server didn't understand the activity");
return Ok(());
}
return Err(e);
}
Ok(()) Ok(())
} }
} }
impl Job for Deliver { impl ActixJob for Deliver {
type State = JobState; type State = JobState;
type Error = Error; type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::Deliver"; const NAME: &'static str = "relay::jobs::Deliver";
const QUEUE: &'static str = "deliver";
const BACKOFF: Backoff = Backoff::Exponential(8); const BACKOFF: Backoff = Backoff::Exponential(8);
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state)) Box::pin(async move { self.permform(state).await.map_err(Into::into) })
} }
} }

View file

@ -1,10 +1,10 @@
use crate::{ use crate::{
error::Error, error::Error,
future::BoxFuture, jobs::{Deliver, JobState},
jobs::{debug_object, Deliver, JobState},
}; };
use activitystreams::iri_string::types::IriString; use activitystreams::iri_string::types::IriString;
use background_jobs::Job; use background_jobs::ActixJob;
use futures_util::future::LocalBoxFuture;
#[derive(Clone, serde::Deserialize, serde::Serialize)] #[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct DeliverMany { pub(crate) struct DeliverMany {
@ -14,9 +14,17 @@ pub(crate) struct DeliverMany {
impl std::fmt::Debug for DeliverMany { impl std::fmt::Debug for DeliverMany {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let to = format!(
"[{}]",
self.to
.iter()
.map(|u| u.to_string())
.collect::<Vec<_>>()
.join(", ")
);
f.debug_struct("DeliverMany") f.debug_struct("DeliverMany")
.field("activity", &self.data["type"]) .field("to", &to)
.field("object", debug_object(&self.data)) .field("data", &self.data)
.finish() .finish()
} }
} }
@ -32,7 +40,7 @@ impl DeliverMany {
}) })
} }
#[tracing::instrument(name = "Deliver many", skip(state))] #[tracing::instrument(name = "Deliver many")]
async fn perform(self, state: JobState) -> Result<(), Error> { async fn perform(self, state: JobState) -> Result<(), Error> {
for inbox in self.to { for inbox in self.to {
state state
@ -45,15 +53,13 @@ impl DeliverMany {
} }
} }
impl Job for DeliverMany { impl ActixJob for DeliverMany {
type State = JobState; type State = JobState;
type Error = Error; type Future = LocalBoxFuture<'static, Result<(), anyhow::Error>>;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::DeliverMany"; const NAME: &'static str = "relay::jobs::DeliverMany";
const QUEUE: &'static str = "deliver";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state)) Box::pin(async move { self.perform(state).await.map_err(Into::into) })
} }
} }

File diff suppressed because one or more lines are too long

123
src/jobs/mod.rs Normal file
View file

@ -0,0 +1,123 @@
pub mod apub;
mod cache_media;
mod contact;
mod deliver;
mod deliver_many;
mod instance;
mod nodeinfo;
mod process_listeners;
pub(crate) use self::{
cache_media::CacheMedia, contact::QueryContact, deliver::Deliver, deliver_many::DeliverMany,
instance::QueryInstance, nodeinfo::QueryNodeinfo,
};
use crate::{
config::Config,
data::{ActorCache, MediaCache, NodeCache, State},
error::{Error, ErrorKind},
jobs::process_listeners::Listeners,
requests::Requests,
};
use background_jobs::{memory_storage::Storage, Job, Manager, QueueHandle, WorkerConfig};
use std::time::Duration;
pub(crate) fn create_workers(
state: State,
actors: ActorCache,
media: MediaCache,
config: Config,
) -> (Manager, JobServer) {
let shared = WorkerConfig::new_managed(Storage::new(), move |queue_handle| {
JobState::new(
state.clone(),
actors.clone(),
JobServer::new(queue_handle),
media.clone(),
config.clone(),
)
})
.register::<Deliver>()
.register::<DeliverMany>()
.register::<QueryNodeinfo>()
.register::<QueryInstance>()
.register::<Listeners>()
.register::<CacheMedia>()
.register::<QueryContact>()
.register::<apub::Announce>()
.register::<apub::Follow>()
.register::<apub::Forward>()
.register::<apub::Reject>()
.register::<apub::Undo>()
.set_worker_count("default", 16)
.start();
shared.every(Duration::from_secs(60 * 5), Listeners);
let job_server = JobServer::new(shared.queue_handle().clone());
(shared, job_server)
}
#[derive(Clone, Debug)]
pub(crate) struct JobState {
requests: Requests,
state: State,
actors: ActorCache,
config: Config,
media: MediaCache,
node_cache: NodeCache,
job_server: JobServer,
}
#[derive(Clone)]
pub(crate) struct JobServer {
remote: QueueHandle,
}
impl std::fmt::Debug for JobServer {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("JobServer")
.field("queue_handle", &"QueueHandle")
.finish()
}
}
impl JobState {
fn new(
state: State,
actors: ActorCache,
job_server: JobServer,
media: MediaCache,
config: Config,
) -> Self {
JobState {
requests: state.requests(&config),
node_cache: state.node_cache(),
actors,
config,
media,
state,
job_server,
}
}
}
impl JobServer {
fn new(remote_handle: QueueHandle) -> Self {
JobServer {
remote: remote_handle,
}
}
pub(crate) async fn queue<J>(&self, job: J) -> Result<(), Error>
where
J: Job,
{
self.remote
.queue(job)
.await
.map_err(ErrorKind::Queue)
.map_err(Into::into)
}
}

File diff suppressed because one or more lines are too long

View file

@ -1,21 +1,20 @@
use crate::{ use crate::{
error::Error, error::Error,
future::BoxFuture,
jobs::{instance::QueryInstance, nodeinfo::QueryNodeinfo, JobState}, jobs::{instance::QueryInstance, nodeinfo::QueryNodeinfo, JobState},
}; };
use background_jobs::Job; use background_jobs::ActixJob;
use std::{future::Future, pin::Pin};
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)] #[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub(crate) struct Listeners; pub(crate) struct Listeners;
impl Listeners { impl Listeners {
#[tracing::instrument(name = "Spawn query instances", skip(state))] #[tracing::instrument(name = "Spawn query instances")]
async fn perform(self, state: JobState) -> Result<(), Error> { async fn perform(self, state: JobState) -> Result<(), Error> {
for actor_id in state.state.db.connected_ids().await? { for actor_id in state.state.db.connected_ids().await? {
state state
.job_server .job_server
.queue(QueryInstance::new(actor_id.clone())) .queue(QueryInstance::new(actor_id.clone())).await?;
.await?;
state.job_server.queue(QueryNodeinfo::new(actor_id)).await?; state.job_server.queue(QueryNodeinfo::new(actor_id)).await?;
} }
@ -23,15 +22,13 @@ impl Listeners {
} }
} }
impl Job for Listeners { impl ActixJob for Listeners {
type State = JobState; type State = JobState;
type Error = Error; type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::Listeners"; const NAME: &'static str = "relay::jobs::Listeners";
const QUEUE: &'static str = "maintenance";
fn run(self, state: Self::State) -> Self::Future { fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state)) Box::pin(async move { self.perform(state).await.map_err(Into::into) })
} }
} }

View file

@ -1,28 +0,0 @@
use crate::{error::Error, future::BoxFuture, jobs::JobState};
use background_jobs::{Backoff, Job};
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub(crate) struct RecordLastOnline;
impl RecordLastOnline {
#[tracing::instrument(skip(state))]
async fn perform(self, state: JobState) -> Result<(), Error> {
let nodes = state.state.last_online.take();
state.state.db.mark_last_seen(nodes).await
}
}
impl Job for RecordLastOnline {
type State = JobState;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::RecordLastOnline";
const QUEUE: &'static str = "maintenance";
const BACKOFF: Backoff = Backoff::Linear(1);
fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state))
}
}

View file

@ -1,48 +1,27 @@
// need this for ructe // need this for ructe
#![allow(clippy::needless_borrow)] #![allow(clippy::needless_borrow)]
use std::time::Duration;
use activitystreams::iri_string::types::IriString; use activitystreams::iri_string::types::IriString;
use actix_web::{middleware::Compress, web, App, HttpServer}; use actix_web::{web, App, HttpServer};
use collector::MemoryCollector;
#[cfg(feature = "console")] #[cfg(feature = "console")]
use console_subscriber::ConsoleLayer; use console_subscriber::ConsoleLayer;
use error::Error; use opentelemetry::{sdk::Resource, KeyValue};
use http_signature_normalization_actix::middleware::VerifySignature;
use metrics_exporter_prometheus::PrometheusBuilder;
use metrics_util::layers::FanoutBuilder;
use opentelemetry::{trace::TracerProvider, KeyValue};
use opentelemetry_otlp::WithExportConfig; use opentelemetry_otlp::WithExportConfig;
use opentelemetry_sdk::Resource;
use reqwest_middleware::ClientWithMiddleware;
use rustls::ServerConfig;
use tokio::task::JoinHandle;
use tracing_actix_web::TracingLogger; use tracing_actix_web::TracingLogger;
use tracing_error::ErrorLayer; use tracing_error::ErrorLayer;
use tracing_log::LogTracer; use tracing_log::LogTracer;
use tracing_subscriber::{filter::Targets, layer::SubscriberExt, Layer}; use tracing_subscriber::{filter::Targets, fmt::format::FmtSpan, layer::SubscriberExt, Layer};
mod admin;
mod apub; mod apub;
mod args; mod args;
mod collector;
mod config; mod config;
mod data; mod data;
mod db; mod db;
mod error; mod error;
mod extractors;
mod future;
mod http1;
mod jobs; mod jobs;
mod middleware; mod middleware;
mod requests; mod requests;
mod routes; mod routes;
mod spawner;
mod stream;
mod telegram;
use crate::config::UrlKind;
use self::{ use self::{
args::Args, args::Args,
@ -50,23 +29,23 @@ use self::{
data::{ActorCache, MediaCache, State}, data::{ActorCache, MediaCache, State},
db::Db, db::Db,
jobs::create_workers, jobs::create_workers,
middleware::{DebugPayload, MyVerify, RelayResolver, Timings}, middleware::{DebugPayload, RelayResolver},
routes::{actor, healthz, inbox, index, nodeinfo, nodeinfo_meta, statics}, routes::{actor, inbox, index, nodeinfo, nodeinfo_meta, statics},
spawner::Spawner,
}; };
fn init_subscriber( fn init_subscriber(
software_name: &'static str, software_name: &'static str,
opentelemetry_url: Option<&IriString>, opentelemetry_url: Option<&IriString>,
) -> color_eyre::Result<()> { ) -> Result<(), anyhow::Error> {
LogTracer::init()?; LogTracer::init()?;
color_eyre::install()?;
let targets: Targets = std::env::var("RUST_LOG") let targets: Targets = std::env::var("RUST_LOG")
.unwrap_or_else(|_| "info".into()) .unwrap_or_else(|_| "info".into())
.parse()?; .parse()?;
let format_layer = tracing_subscriber::fmt::layer().with_filter(targets.clone()); let format_layer = tracing_subscriber::fmt::layer()
.with_span_events(FmtSpan::NEW | FmtSpan::CLOSE)
.with_filter(targets.clone());
#[cfg(feature = "console")] #[cfg(feature = "console")]
let console_layer = ConsoleLayer::builder() let console_layer = ConsoleLayer::builder()
@ -83,21 +62,21 @@ fn init_subscriber(
let subscriber = subscriber.with(console_layer); let subscriber = subscriber.with(console_layer);
if let Some(url) = opentelemetry_url { if let Some(url) = opentelemetry_url {
let exporter = opentelemetry_otlp::SpanExporter::builder() let tracer =
.with_tonic() opentelemetry_otlp::new_pipeline()
.with_endpoint(url.as_str()) .tracing()
.build()?; .with_trace_config(opentelemetry::sdk::trace::config().with_resource(
Resource::new(vec![KeyValue::new("service.name", software_name)]),
let tracer_provider = opentelemetry_sdk::trace::TracerProvider::builder() ))
.with_resource(Resource::new(vec![KeyValue::new( .with_exporter(
"service.name", opentelemetry_otlp::new_exporter()
software_name, .tonic()
)])) .with_endpoint(url.as_str()),
.with_batch_exporter(exporter, opentelemetry_sdk::runtime::Tokio) )
.build(); .install_batch(opentelemetry::runtime::Tokio)?;
let otel_layer = tracing_opentelemetry::layer() let otel_layer = tracing_opentelemetry::layer()
.with_tracer(tracer_provider.tracer(software_name)) .with_tracer(tracer)
.with_filter(targets); .with_filter(targets);
let subscriber = subscriber.with(otel_layer); let subscriber = subscriber.with(otel_layer);
@ -109,266 +88,56 @@ fn init_subscriber(
Ok(()) Ok(())
} }
fn build_client( #[actix_rt::main]
user_agent: &str, async fn main() -> Result<(), anyhow::Error> {
timeout_seconds: u64,
proxy: Option<(&IriString, Option<(&str, &str)>)>,
) -> Result<ClientWithMiddleware, Error> {
let builder = reqwest::Client::builder().user_agent(user_agent.to_string());
let builder = if let Some((url, auth)) = proxy {
let proxy = reqwest::Proxy::all(url.as_str())?;
let proxy = if let Some((username, password)) = auth {
proxy.basic_auth(username, password)
} else {
proxy
};
builder.proxy(proxy)
} else {
builder
};
let client = builder
.timeout(Duration::from_secs(timeout_seconds))
.build()?;
let client_with_middleware = reqwest_middleware::ClientBuilder::new(client)
.with(reqwest_tracing::TracingMiddleware::default())
.build();
Ok(client_with_middleware)
}
#[tokio::main]
async fn main() -> color_eyre::Result<()> {
dotenv::dotenv().ok(); dotenv::dotenv().ok();
let config = Config::build()?; let config = Config::build()?;
init_subscriber(Config::software_name(), config.opentelemetry_url())?; init_subscriber(Config::software_name(), config.opentelemetry_url())?;
let args = Args::new();
if args.any() {
client_main(config, args).await??;
return Ok(());
}
let collector = MemoryCollector::new();
if let Some(bind_addr) = config.prometheus_bind_address() {
let (recorder, exporter) = PrometheusBuilder::new()
.with_http_listener(bind_addr)
.build()?;
tokio::spawn(exporter);
let recorder = FanoutBuilder::default()
.add_recorder(recorder)
.add_recorder(collector.clone())
.build();
metrics::set_global_recorder(recorder).map_err(|e| color_eyre::eyre::eyre!("{e}"))?;
} else {
collector.install()?;
}
tracing::info!("Opening DB");
let db = Db::build(&config)?; let db = Db::build(&config)?;
tracing::info!("Building caches"); let args = Args::new();
let actors = ActorCache::new(db.clone());
let media = MediaCache::new(db.clone());
server_main(db, actors, media, collector, config).await?;
tracing::info!("Application exit");
Ok(())
}
fn client_main(config: Config, args: Args) -> JoinHandle<color_eyre::Result<()>> {
tokio::spawn(do_client_main(config, args))
}
async fn do_client_main(config: Config, args: Args) -> color_eyre::Result<()> {
let client = build_client(
&config.user_agent(),
config.client_timeout(),
config.proxy_config(),
)?;
if !args.blocks().is_empty() || !args.allowed().is_empty() { if !args.blocks().is_empty() || !args.allowed().is_empty() {
if args.undo() { if args.undo() {
admin::client::unblock(&client, &config, args.blocks().to_vec()).await?; db.remove_blocks(args.blocks().to_vec()).await?;
admin::client::disallow(&client, &config, args.allowed().to_vec()).await?; db.remove_allows(args.allowed().to_vec()).await?;
} else { } else {
admin::client::block(&client, &config, args.blocks().to_vec()).await?; db.add_blocks(args.blocks().to_vec()).await?;
admin::client::allow(&client, &config, args.allowed().to_vec()).await?; db.add_allows(args.allowed().to_vec()).await?;
} }
println!("Updated lists"); return Ok(());
} }
if args.contacted() { let media = MediaCache::new(db.clone());
let last_seen = admin::client::last_seen(&client, &config).await?; let state = State::build(db.clone()).await?;
let actors = ActorCache::new(db.clone());
let mut report = String::from("Contacted:"); let (manager, job_server) =
create_workers(state.clone(), actors.clone(), media.clone(), config.clone());
if !last_seen.never.is_empty() {
report += "\nNever seen:\n";
}
for domain in last_seen.never {
report += "\t";
report += &domain;
report += "\n";
}
if !last_seen.last_seen.is_empty() {
report += "\nSeen:\n";
}
for (datetime, domains) in last_seen.last_seen {
for domain in domains {
report += "\t";
report += &datetime.to_string();
report += " - ";
report += &domain;
report += "\n";
}
}
report += "\n";
println!("{report}");
}
if args.list() {
let (blocked, allowed, connected) = tokio::try_join!(
admin::client::blocked(&client, &config),
admin::client::allowed(&client, &config),
admin::client::connected(&client, &config)
)?;
let mut report = String::from("Report:\n");
if !allowed.allowed_domains.is_empty() {
report += "\nAllowed\n\t";
report += &allowed.allowed_domains.join("\n\t");
}
if !blocked.blocked_domains.is_empty() {
report += "\n\nBlocked\n\t";
report += &blocked.blocked_domains.join("\n\t");
}
if !connected.connected_actors.is_empty() {
report += "\n\nConnected\n\t";
report += &connected.connected_actors.join("\n\t");
}
report += "\n";
println!("{report}");
}
if args.stats() {
let stats = admin::client::stats(&client, &config).await?;
stats.present();
}
Ok(())
}
const VERIFY_RATIO: usize = 7;
async fn server_main(
db: Db,
actors: ActorCache,
media: MediaCache,
collector: MemoryCollector,
config: Config,
) -> color_eyre::Result<()> {
let client = build_client(
&config.user_agent(),
config.client_timeout(),
config.proxy_config(),
)?;
tracing::info!("Creating state");
let (signature_threads, verify_threads) = match config.signature_threads() {
0 | 1 => (1, 1),
n if n <= VERIFY_RATIO => (n, 1),
n => {
let verify_threads = (n / VERIFY_RATIO).max(1);
let signature_threads = n.saturating_sub(verify_threads).max(VERIFY_RATIO);
(signature_threads, verify_threads)
}
};
let verify_spawner = Spawner::build("verify-cpu", verify_threads.try_into()?)?;
let sign_spawner = Spawner::build("sign-cpu", signature_threads.try_into()?)?;
let key_id = config.generate_url(UrlKind::MainKey).to_string();
let state = State::build(db.clone(), key_id, sign_spawner.clone(), client).await?;
if let Some((token, admin_handle)) = config.telegram_info() {
tracing::info!("Creating telegram handler");
telegram::start(admin_handle.to_owned(), db.clone(), token);
}
let cert_resolver = config
.open_keys()
.await?
.map(rustls_channel_resolver::channel::<32>);
let bind_address = config.bind_address(); let bind_address = config.bind_address();
let sign_spawner2 = sign_spawner.clone(); HttpServer::new(move || {
let verify_spawner2 = verify_spawner.clone(); App::new()
let config2 = config.clone(); .wrap(TracingLogger::default())
let job_store = jobs::build_storage();
let server = HttpServer::new(move || {
let job_server = create_workers(
job_store.clone(),
state.clone(),
actors.clone(),
media.clone(),
config.clone(),
)
.expect("Failed to create job server");
let app = App::new()
.app_data(web::Data::new(db.clone())) .app_data(web::Data::new(db.clone()))
.app_data(web::Data::new(state.clone())) .app_data(web::Data::new(state.clone()))
.app_data(web::Data::new( .app_data(web::Data::new(state.requests(&config)))
state.requests.clone().spawner(verify_spawner.clone()),
))
.app_data(web::Data::new(actors.clone())) .app_data(web::Data::new(actors.clone()))
.app_data(web::Data::new(config.clone())) .app_data(web::Data::new(config.clone()))
.app_data(web::Data::new(job_server)) .app_data(web::Data::new(job_server.clone()))
.app_data(web::Data::new(media.clone())) .app_data(web::Data::new(media.clone()))
.app_data(web::Data::new(collector.clone()))
.app_data(web::Data::new(verify_spawner.clone()));
let app = if let Some(data) = config.admin_config() {
app.app_data(data)
} else {
app
};
app.wrap(Compress::default())
.wrap(TracingLogger::default())
.wrap(Timings)
.route("/healthz", web::get().to(healthz))
.service(web::resource("/").route(web::get().to(index))) .service(web::resource("/").route(web::get().to(index)))
.service(web::resource("/media/{path}").route(web::get().to(routes::media))) .service(web::resource("/media/{path}").route(web::get().to(routes::media)))
.service( .service(
web::resource("/inbox") web::resource("/inbox")
.wrap(config.digest_middleware().spawner(verify_spawner.clone())) .wrap(config.digest_middleware())
.wrap(VerifySignature::new( .wrap(config.signature_middleware(
MyVerify( state.requests(&config),
state.requests.clone().spawner(verify_spawner.clone()), actors.clone(),
actors.clone(), state.clone(),
state.clone(),
verify_spawner.clone(),
),
http_signature_normalization_actix::Config::new(),
)) ))
.wrap(DebugPayload(config.debug())) .wrap(DebugPayload(config.debug()))
.route(web::post().to(inbox)), .route(web::post().to(inbox)),
@ -381,58 +150,12 @@ async fn server_main(
.service(web::resource("/nodeinfo").route(web::get().to(nodeinfo_meta))), .service(web::resource("/nodeinfo").route(web::get().to(nodeinfo_meta))),
) )
.service(web::resource("/static/{filename}").route(web::get().to(statics))) .service(web::resource("/static/{filename}").route(web::get().to(statics)))
.service( })
web::scope("/api/v1").service( .bind(bind_address)?
web::scope("/admin") .run()
.route("/allow", web::post().to(admin::routes::allow)) .await?;
.route("/disallow", web::post().to(admin::routes::disallow))
.route("/block", web::post().to(admin::routes::block))
.route("/unblock", web::post().to(admin::routes::unblock))
.route("/allowed", web::get().to(admin::routes::allowed))
.route("/blocked", web::get().to(admin::routes::blocked))
.route("/connected", web::get().to(admin::routes::connected))
.route("/stats", web::get().to(admin::routes::stats))
.route("/last_seen", web::get().to(admin::routes::last_seen)),
),
)
});
if let Some((cert_tx, cert_rx)) = cert_resolver { drop(manager);
let handle = tokio::spawn(async move {
let mut interval = tokio::time::interval(Duration::from_secs(30));
interval.tick().await;
loop {
interval.tick().await;
match config2.open_keys().await {
Ok(Some(key)) => cert_tx.update(key),
Ok(None) => tracing::warn!("Missing TLS keys"),
Err(e) => tracing::error!("Failed to read TLS keys {e}"),
}
}
});
tracing::info!("Binding to {}:{} with TLS", bind_address.0, bind_address.1);
let server_config = ServerConfig::builder()
.with_no_client_auth()
.with_cert_resolver(cert_rx);
server
.bind_rustls_0_23(bind_address, server_config)?
.run()
.await?;
handle.abort();
let _ = handle.await;
} else {
tracing::info!("Binding to {}:{}", bind_address.0, bind_address.1);
server.bind(bind_address)?.run().await?;
}
sign_spawner2.close().await;
verify_spawner2.close().await;
tracing::info!("Server closed");
Ok(()) Ok(())
} }

View file

@ -1,9 +1,7 @@
mod payload; mod payload;
mod timings;
mod verifier; mod verifier;
mod webfinger; mod webfinger;
pub(crate) use payload::DebugPayload; pub(crate) use payload::DebugPayload;
pub(crate) use timings::Timings;
pub(crate) use verifier::MyVerify; pub(crate) use verifier::MyVerify;
pub(crate) use webfinger::RelayResolver; pub(crate) use webfinger::RelayResolver;

View file

@ -4,11 +4,15 @@ use actix_web::{
web::BytesMut, web::BytesMut,
HttpMessage, HttpMessage,
}; };
use futures_util::{
future::{LocalBoxFuture, TryFutureExt},
stream::{once, TryStreamExt},
};
use std::{ use std::{
future::{ready, Ready}, future::{ready, Ready},
task::{Context, Poll}, task::{Context, Poll},
}; };
use streem::IntoStreamer; use tracing::info;
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub(crate) struct DebugPayload(pub bool); pub(crate) struct DebugPayload(pub bool);
@ -42,7 +46,7 @@ where
{ {
type Response = S::Response; type Response = S::Response;
type Error = S::Error; type Error = S::Error;
type Future = S::Future; type Future = LocalBoxFuture<'static, Result<S::Response, S::Error>>;
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.1.poll_ready(cx) self.1.poll_ready(cx)
@ -50,28 +54,28 @@ where
fn call(&self, mut req: ServiceRequest) -> Self::Future { fn call(&self, mut req: ServiceRequest) -> Self::Future {
if self.0 && req.method() == Method::POST { if self.0 && req.method() == Method::POST {
let mut pl = req.take_payload().into_streamer(); let pl = req.take_payload();
req.set_payload(Payload::Stream { req.set_payload(Payload::Stream {
payload: Box::pin(streem::try_from_fn(|yielder| async move { payload: Box::pin(once(
let mut buf = BytesMut::new(); pl.try_fold(BytesMut::new(), |mut acc, bytes| async {
acc.extend(bytes);
while let Some(bytes) = pl.try_next().await? { Ok(acc)
buf.extend(bytes); })
} .map_ok(|bytes| {
let bytes = bytes.freeze();
let bytes = buf.freeze(); info!("{}", String::from_utf8_lossy(&bytes));
tracing::info!("{}", String::from_utf8_lossy(&bytes)); bytes
}),
yielder.yield_ok(bytes).await; )),
Ok(())
})),
}); });
self.1.call(req) let fut = self.1.call(req);
Box::pin(async move { fut.await })
} else { } else {
self.1.call(req) let fut = self.1.call(req);
Box::pin(async move { fut.await })
} }
} }
} }

View file

@ -1,143 +0,0 @@
use actix_web::{
body::MessageBody,
dev::{Service, ServiceRequest, ServiceResponse, Transform},
http::StatusCode,
};
use std::{
future::{ready, Future, Ready},
time::Instant,
};
pub(crate) struct Timings;
pub(crate) struct TimingsMiddleware<S>(S);
struct LogOnDrop {
begin: Instant,
path: String,
method: String,
arm: bool,
}
pin_project_lite::pin_project! {
pub(crate) struct TimingsFuture<F> {
#[pin]
future: F,
log_on_drop: Option<LogOnDrop>,
}
}
pin_project_lite::pin_project! {
pub(crate) struct TimingsBody<B> {
#[pin]
body: B,
log_on_drop: LogOnDrop,
}
}
impl Drop for LogOnDrop {
fn drop(&mut self) {
if self.arm {
let duration = self.begin.elapsed();
metrics::histogram!("relay.request.complete", "path" => self.path.clone(), "method" => self.method.clone()).record(duration);
}
}
}
impl<S, B> Transform<S, ServiceRequest> for Timings
where
S: Service<ServiceRequest, Response = ServiceResponse<B>, Error = actix_web::Error>,
S::Future: 'static,
{
type Response = ServiceResponse<TimingsBody<B>>;
type Error = S::Error;
type InitError = ();
type Transform = TimingsMiddleware<S>;
type Future = Ready<Result<Self::Transform, Self::InitError>>;
fn new_transform(&self, service: S) -> Self::Future {
ready(Ok(TimingsMiddleware(service)))
}
}
impl<S, B> Service<ServiceRequest> for TimingsMiddleware<S>
where
S: Service<ServiceRequest, Response = ServiceResponse<B>, Error = actix_web::Error>,
S::Future: 'static,
{
type Response = ServiceResponse<TimingsBody<B>>;
type Error = S::Error;
type Future = TimingsFuture<S::Future>;
fn poll_ready(
&self,
ctx: &mut core::task::Context<'_>,
) -> std::task::Poll<Result<(), Self::Error>> {
self.0.poll_ready(ctx)
}
fn call(&self, req: ServiceRequest) -> Self::Future {
let log_on_drop = LogOnDrop {
begin: Instant::now(),
path: format!("{:?}", req.match_pattern()),
method: req.method().to_string(),
arm: false,
};
let future = self.0.call(req);
TimingsFuture {
future,
log_on_drop: Some(log_on_drop),
}
}
}
impl<F, B> Future for TimingsFuture<F>
where
F: Future<Output = Result<ServiceResponse<B>, actix_web::Error>>,
{
type Output = Result<ServiceResponse<TimingsBody<B>>, actix_web::Error>;
fn poll(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Self::Output> {
let this = self.project();
let res = std::task::ready!(this.future.poll(cx));
let mut log_on_drop = this
.log_on_drop
.take()
.expect("TimingsFuture polled after completion");
let status = match &res {
Ok(res) => res.status(),
Err(e) => e.as_response_error().status_code(),
};
log_on_drop.arm =
status != StatusCode::NOT_FOUND && status != StatusCode::METHOD_NOT_ALLOWED;
let res = res.map(|r| r.map_body(|_, body| TimingsBody { body, log_on_drop }));
std::task::Poll::Ready(res)
}
}
impl<B: MessageBody> MessageBody for TimingsBody<B> {
type Error = B::Error;
fn size(&self) -> actix_web::body::BodySize {
self.body.size()
}
fn poll_next(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Result<actix_web::web::Bytes, Self::Error>>> {
self.project().body.poll_next(cx)
}
}

View file

@ -2,20 +2,20 @@ use crate::{
apub::AcceptedActors, apub::AcceptedActors,
data::{ActorCache, State}, data::{ActorCache, State},
error::{Error, ErrorKind}, error::{Error, ErrorKind},
requests::{BreakerStrategy, Requests}, requests::Requests,
spawner::Spawner,
}; };
use activitystreams::{base::BaseExt, iri, iri_string::types::IriString}; use activitystreams::{base::BaseExt, iri, iri_string::types::IriString};
use base64::{engine::general_purpose::STANDARD, Engine}; use actix_web::web;
use http_signature_normalization_actix::{prelude::*, verify::DeprecatedAlgorithm, Spawn}; use http_signature_normalization_actix::{prelude::*, verify::DeprecatedAlgorithm};
use rsa::{pkcs1::EncodeRsaPublicKey, pkcs8::DecodePublicKey, RsaPublicKey}; use rsa::{hash::Hash, padding::PaddingScheme, pkcs8::FromPublicKey, PublicKey, RsaPublicKey};
use sha2::{Digest, Sha256};
use std::{future::Future, pin::Pin}; use std::{future::Future, pin::Pin};
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub(crate) struct MyVerify(pub Requests, pub ActorCache, pub State, pub Spawner); pub(crate) struct MyVerify(pub Requests, pub ActorCache, pub State);
impl MyVerify { impl MyVerify {
#[tracing::instrument("Verify request", skip(self, signature, signing_string))] #[tracing::instrument("Verify signature")]
async fn verify( async fn verify(
&self, &self,
algorithm: Option<Algorithm>, algorithm: Option<Algorithm>,
@ -25,9 +25,6 @@ impl MyVerify {
) -> Result<bool, Error> { ) -> Result<bool, Error> {
let public_key_id = iri!(key_id); let public_key_id = iri!(key_id);
// receiving an activity from a domain indicates it is probably online
self.0.reset_breaker(&public_key_id);
let actor_id = if let Some(mut actor_id) = self let actor_id = if let Some(mut actor_id) = self
.2 .2
.db .db
@ -52,13 +49,7 @@ impl MyVerify {
None => (), None => (),
}; };
let res = do_verify( let res = do_verify(&actor.public_key, signature.clone(), signing_string.clone()).await;
&self.3,
&actor.public_key,
signature.clone(),
signing_string.clone(),
)
.await;
if let Err(e) = res { if let Err(e) = res {
if !was_cached { if !was_cached {
@ -70,21 +61,11 @@ impl MyVerify {
actor_id actor_id
} else { } else {
match self self.0
.0 .fetch::<PublicKeyResponse>(public_key_id.as_str())
.fetch::<PublicKeyResponse>(&public_key_id, BreakerStrategy::Require2XX) .await?
.await .actor_id()
{ .ok_or(ErrorKind::MissingId)?
Ok(res) => res.actor_id().ok_or(ErrorKind::MissingId),
Err(e) => {
if e.is_gone() {
tracing::warn!("Actor gone: {public_key_id}");
return Ok(false);
} else {
return Err(e);
}
}
}?
}; };
// Previously we verified the sig from an actor's local cache // Previously we verified the sig from an actor's local cache
@ -92,7 +73,7 @@ impl MyVerify {
// Now we make sure we fetch an updated actor // Now we make sure we fetch an updated actor
let actor = self.1.get_no_cache(&actor_id, &self.0).await?; let actor = self.1.get_no_cache(&actor_id, &self.0).await?;
do_verify(&self.3, &actor.public_key, signature, signing_string).await?; do_verify(&actor.public_key, signature, signing_string).await?;
Ok(true) Ok(true)
} }
@ -121,36 +102,28 @@ impl PublicKeyResponse {
} }
} }
#[tracing::instrument("Verify signature")]
async fn do_verify( async fn do_verify(
spawner: &Spawner,
public_key: &str, public_key: &str,
signature: String, signature: String,
signing_string: String, signing_string: String,
) -> Result<(), Error> { ) -> Result<(), Error> {
let public_key = RsaPublicKey::from_public_key_pem(public_key.trim())?; let public_key = RsaPublicKey::from_public_key_pem(public_key)?;
let public_key_der = public_key
.to_pkcs1_der()
.map_err(|_| ErrorKind::DerEncode)?;
let public_key = ring::signature::UnparsedPublicKey::new(
&ring::signature::RSA_PKCS1_2048_8192_SHA256,
public_key_der,
);
let span = tracing::Span::current(); web::block(move || {
spawner let decoded = base64::decode(signature)?;
.spawn_blocking(move || { let hashed = Sha256::digest(signing_string.as_bytes());
span.in_scope(|| {
let decoded = STANDARD.decode(signature)?;
public_key public_key.verify(
.verify(signing_string.as_bytes(), decoded.as_slice()) PaddingScheme::PKCS1v15Sign {
.map_err(|_| ErrorKind::VerifySignature)?; hash: Some(Hash::SHA2_256),
},
&hashed,
&decoded,
)?;
Ok(()) as Result<(), Error> Ok(()) as Result<(), Error>
}) })
}) .await??;
.await??;
Ok(()) Ok(())
} }
@ -174,36 +147,3 @@ impl SignatureVerify for MyVerify {
}) })
} }
} }
#[cfg(test)]
mod tests {
use crate::apub::AcceptedActors;
use rsa::{pkcs8::DecodePublicKey, RsaPublicKey};
const ASONIX_DOG_ACTOR: &str = r#"{"@context":["https://www.w3.org/ns/activitystreams","https://w3id.org/security/v1",{"manuallyApprovesFollowers":"as:manuallyApprovesFollowers","toot":"http://joinmastodon.org/ns#","featured":{"@id":"toot:featured","@type":"@id"},"featuredTags":{"@id":"toot:featuredTags","@type":"@id"},"alsoKnownAs":{"@id":"as:alsoKnownAs","@type":"@id"},"movedTo":{"@id":"as:movedTo","@type":"@id"},"schema":"http://schema.org#","PropertyValue":"schema:PropertyValue","value":"schema:value","discoverable":"toot:discoverable","Device":"toot:Device","Ed25519Signature":"toot:Ed25519Signature","Ed25519Key":"toot:Ed25519Key","Curve25519Key":"toot:Curve25519Key","EncryptedMessage":"toot:EncryptedMessage","publicKeyBase64":"toot:publicKeyBase64","deviceId":"toot:deviceId","claim":{"@type":"@id","@id":"toot:claim"},"fingerprintKey":{"@type":"@id","@id":"toot:fingerprintKey"},"identityKey":{"@type":"@id","@id":"toot:identityKey"},"devices":{"@type":"@id","@id":"toot:devices"},"messageFranking":"toot:messageFranking","messageType":"toot:messageType","cipherText":"toot:cipherText","suspended":"toot:suspended"}],"id":"https://masto.asonix.dog/actor","type":"Application","inbox":"https://masto.asonix.dog/actor/inbox","outbox":"https://masto.asonix.dog/actor/outbox","preferredUsername":"masto.asonix.dog","url":"https://masto.asonix.dog/about/more?instance_actor=true","manuallyApprovesFollowers":true,"publicKey":{"id":"https://masto.asonix.dog/actor#main-key","owner":"https://masto.asonix.dog/actor","publicKeyPem":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx8zXS0QNg9YGUBsxAOBH\nJaxIn7i6t+Z4UOpSFDVa2kP0NvQgIJsq3wzRqvaiuncRWpkyFk1fTakiRGD32xnY\nt+juuAaIBlU8eswKyANFqhcLAvFHmT3rA1848M4/YM19djvlL/PR9T53tPNHU+el\nS9MlsG3o6Zsj8YaUJtCI8RgEuJoROLHUb/V9a3oMQ7CfuIoSvF3VEz3/dRT09RW6\n0wQX7yhka9WlKuayWLWmTcB9lAIX6neBk+qKc8VSEsO7mHkzB8mRgVcS2uYZl1eA\nD8/jTT+SlpeFNDZk0Oh35GNFoOxh9qjRw3NGxu7jJCVBehDODzasOv4xDxKAhONa\njQIDAQAB\n-----END PUBLIC KEY-----\n"},"endpoints":{"sharedInbox":"https://masto.asonix.dog/inbox"}}"#;
const KARJALAZET_RELAY: &str = r#"{"@context":["https://www.w3.org/ns/activitystreams","https://pleroma.karjalazet.se/schemas/litepub-0.1.jsonld",{"@language":"und"}],"alsoKnownAs":[],"attachment":[],"capabilities":{},"discoverable":false,"endpoints":{"oauthAuthorizationEndpoint":"https://pleroma.karjalazet.se/oauth/authorize","oauthRegistrationEndpoint":"https://pleroma.karjalazet.se/api/v1/apps","oauthTokenEndpoint":"https://pleroma.karjalazet.se/oauth/token","sharedInbox":"https://pleroma.karjalazet.se/inbox","uploadMedia":"https://pleroma.karjalazet.se/api/ap/upload_media"},"featured":"https://pleroma.karjalazet.se/relay/collections/featured","followers":"https://pleroma.karjalazet.se/relay/followers","following":"https://pleroma.karjalazet.se/relay/following","id":"https://pleroma.karjalazet.se/relay","inbox":"https://pleroma.karjalazet.se/relay/inbox","manuallyApprovesFollowers":false,"name":null,"outbox":"https://pleroma.karjalazet.se/relay/outbox","preferredUsername":"relay","publicKey":{"id":"https://pleroma.karjalazet.se/relay#main-key","owner":"https://pleroma.karjalazet.se/relay","publicKeyPem":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAucoyCht6QpEzUPdQWP/J\nJYxObSH3MCcXBnG4d0OX78QshloeAHhl78EZ5c8I0ePmIjDg2NFK3/pG0EvSrHe2\nIZHnHaN5emgCb2ifNya5W572yfQXo1tUQy+ZXtbTUA7BWbr4LuCvd+HUavMwbx72\neraSZTiQj//ObwpbXFoZO5I/+e5avGmVnfmr/y2cG95hqFDtI3438RgZyBjY5kJM\nY1MLWoY9itGSfYmBtxRj3umlC2bPuBB+hHUJi6TvP7NO6zuUZ66m4ETyuBDi8iP6\ngnUp3Q4+1/I3nDUmhjt7OXckUcX3r5M4UHD3VVUFG0aZw6WWMEAxlyFf/07fCkhR\nBwIDAQAB\n-----END PUBLIC KEY-----\n\n"},"summary":"","tag":[],"type":"Person","url":"https://pleroma.karjalazet.se/relay"}"#;
const ASONIX_DOG_KEY: &str = "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx8zXS0QNg9YGUBsxAOBH\nJaxIn7i6t+Z4UOpSFDVa2kP0NvQgIJsq3wzRqvaiuncRWpkyFk1fTakiRGD32xnY\nt+juuAaIBlU8eswKyANFqhcLAvFHmT3rA1848M4/YM19djvlL/PR9T53tPNHU+el\nS9MlsG3o6Zsj8YaUJtCI8RgEuJoROLHUb/V9a3oMQ7CfuIoSvF3VEz3/dRT09RW6\n0wQX7yhka9WlKuayWLWmTcB9lAIX6neBk+qKc8VSEsO7mHkzB8mRgVcS2uYZl1eA\nD8/jTT+SlpeFNDZk0Oh35GNFoOxh9qjRw3NGxu7jJCVBehDODzasOv4xDxKAhONa\njQIDAQAB\n-----END PUBLIC KEY-----\n";
const KARJALAZET_KEY: &str = "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAucoyCht6QpEzUPdQWP/J\nJYxObSH3MCcXBnG4d0OX78QshloeAHhl78EZ5c8I0ePmIjDg2NFK3/pG0EvSrHe2\nIZHnHaN5emgCb2ifNya5W572yfQXo1tUQy+ZXtbTUA7BWbr4LuCvd+HUavMwbx72\neraSZTiQj//ObwpbXFoZO5I/+e5avGmVnfmr/y2cG95hqFDtI3438RgZyBjY5kJM\nY1MLWoY9itGSfYmBtxRj3umlC2bPuBB+hHUJi6TvP7NO6zuUZ66m4ETyuBDi8iP6\ngnUp3Q4+1/I3nDUmhjt7OXckUcX3r5M4UHD3VVUFG0aZw6WWMEAxlyFf/07fCkhR\nBwIDAQAB\n-----END PUBLIC KEY-----\n\n";
#[test]
fn handles_masto_keys() {
println!("{ASONIX_DOG_KEY}");
let _ = RsaPublicKey::from_public_key_pem(ASONIX_DOG_KEY.trim()).unwrap();
}
#[test]
fn handles_pleromo_keys() {
println!("{KARJALAZET_KEY}");
let _ = RsaPublicKey::from_public_key_pem(KARJALAZET_KEY.trim()).unwrap();
}
#[test]
fn handles_pleromo_relay_format() {
let _: AcceptedActors = serde_json::from_str(KARJALAZET_RELAY).unwrap();
}
#[test]
fn handles_masto_relay_format() {
let _: AcceptedActors = serde_json::from_str(ASONIX_DOG_ACTOR).unwrap();
}
}

View file

@ -1,10 +1,10 @@
use crate::{ use crate::{
config::{Config, UrlKind}, config::{Config, UrlKind},
data::State, data::State,
future::LocalBoxFuture,
}; };
use actix_web::web::Data; use actix_web::web::Data;
use actix_webfinger::{Resolver, Webfinger}; use actix_webfinger::{Resolver, Webfinger};
use futures_util::future::LocalBoxFuture;
use rsa_magic_public_key::AsMagicPublicKey; use rsa_magic_public_key::AsMagicPublicKey;
pub(crate) struct RelayResolver; pub(crate) struct RelayResolver;

View file

@ -1,43 +1,28 @@
use crate::{ use crate::error::{Error, ErrorKind};
data::LastOnline,
error::{Error, ErrorKind},
spawner::Spawner,
stream::{aggregate, limit_stream},
};
use activitystreams::iri_string::types::IriString; use activitystreams::iri_string::types::IriString;
use actix_web::http::header::Date; use actix_web::{http::header::Date, web::Bytes};
use base64::{engine::general_purpose::STANDARD, Engine}; use awc::Client;
use dashmap::DashMap; use dashmap::DashMap;
use http_signature_normalization_reqwest::{digest::ring::Sha256, prelude::*}; use http_signature_normalization_actix::prelude::*;
use reqwest_middleware::ClientWithMiddleware; use rsa::{hash::Hash, padding::PaddingScheme, RsaPrivateKey};
use ring::{ use sha2::{Digest, Sha256};
rand::SystemRandom,
signature::{RsaKeyPair, RSA_PKCS1_SHA256},
};
use rsa::{pkcs1::EncodeRsaPrivateKey, RsaPrivateKey};
use std::{ use std::{
sync::Arc, cell::RefCell,
rc::Rc,
sync::{
atomic::{AtomicUsize, Ordering},
Arc,
},
time::{Duration, SystemTime}, time::{Duration, SystemTime},
}; };
use tracing::{debug, info, warn};
use tracing_awc::Tracing;
const ONE_SECOND: u64 = 1; const ONE_SECOND: u64 = 1;
const ONE_MINUTE: u64 = 60 * ONE_SECOND; const ONE_MINUTE: u64 = 60 * ONE_SECOND;
const ONE_HOUR: u64 = 60 * ONE_MINUTE; const ONE_HOUR: u64 = 60 * ONE_MINUTE;
const ONE_DAY: u64 = 24 * ONE_HOUR; const ONE_DAY: u64 = 24 * ONE_HOUR;
// 20 KB
const JSON_SIZE_LIMIT: usize = 20 * 1024;
#[derive(Debug)]
pub(crate) enum BreakerStrategy {
// Requires a successful response
Require2XX,
// Allows HTTP 2xx-401
Allow401AndBelow,
// Allows HTTP 2xx-404
Allow404AndBelow,
}
#[derive(Clone)] #[derive(Clone)]
pub(crate) struct Breakers { pub(crate) struct Breakers {
inner: Arc<DashMap<String, Breaker>>, inner: Arc<DashMap<String, Breaker>>,
@ -50,7 +35,7 @@ impl std::fmt::Debug for Breakers {
} }
impl Breakers { impl Breakers {
pub(crate) fn should_try(&self, url: &IriString) -> bool { fn should_try(&self, url: &IriString) -> bool {
if let Some(authority) = url.authority_str() { if let Some(authority) = url.authority_str() {
if let Some(breaker) = self.inner.get(authority) { if let Some(breaker) = self.inner.get(authority) {
breaker.should_try() breaker.should_try()
@ -67,9 +52,6 @@ impl Breakers {
let should_write = { let should_write = {
if let Some(mut breaker) = self.inner.get_mut(authority) { if let Some(mut breaker) = self.inner.get_mut(authority) {
breaker.fail(); breaker.fail();
if !breaker.should_try() {
tracing::warn!("Failed breaker for {authority}");
}
false false
} else { } else {
true true
@ -118,12 +100,17 @@ struct Breaker {
} }
impl Breaker { impl Breaker {
const FAILURE_WAIT: Duration = Duration::from_secs(ONE_DAY); const fn failure_threshold() -> usize {
const FAILURE_THRESHOLD: usize = 10; 10
}
fn failure_wait() -> Duration {
Duration::from_secs(ONE_DAY)
}
fn should_try(&self) -> bool { fn should_try(&self) -> bool {
self.failures < Self::FAILURE_THRESHOLD self.failures < Self::failure_threshold()
|| self.last_attempt + Self::FAILURE_WAIT < SystemTime::now() || self.last_attempt + Self::failure_wait() < SystemTime::now()
} }
fn fail(&mut self) { fn fail(&mut self) {
@ -152,19 +139,22 @@ impl Default for Breaker {
#[derive(Clone)] #[derive(Clone)]
pub(crate) struct Requests { pub(crate) struct Requests {
client: ClientWithMiddleware, client: Rc<RefCell<Client>>,
consecutive_errors: Rc<AtomicUsize>,
error_limit: usize,
key_id: String, key_id: String,
private_key: Arc<RsaKeyPair>, user_agent: String,
rng: SystemRandom, private_key: RsaPrivateKey,
config: Config<Spawner>, config: Config,
breakers: Breakers, breakers: Breakers,
last_online: Arc<LastOnline>,
} }
impl std::fmt::Debug for Requests { impl std::fmt::Debug for Requests {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Requests") f.debug_struct("Requests")
.field("error_limit", &self.error_limit)
.field("key_id", &self.key_id) .field("key_id", &self.key_id)
.field("user_agent", &self.user_agent)
.field("config", &self.config) .field("config", &self.config)
.field("breakers", &self.breakers) .field("breakers", &self.breakers)
.finish() .finish()
@ -172,225 +162,195 @@ impl std::fmt::Debug for Requests {
} }
impl Requests { impl Requests {
#[allow(clippy::too_many_arguments)]
pub(crate) fn new( pub(crate) fn new(
key_id: String, key_id: String,
private_key: RsaPrivateKey, private_key: RsaPrivateKey,
user_agent: String,
breakers: Breakers, breakers: Breakers,
last_online: Arc<LastOnline>,
spawner: Spawner,
client: ClientWithMiddleware,
) -> Self { ) -> Self {
let private_key_der = private_key.to_pkcs1_der().expect("Can encode der");
let private_key = ring::signature::RsaKeyPair::from_der(private_key_der.as_bytes())
.expect("Key is valid");
Requests { Requests {
client, client: Rc::new(RefCell::new(
Client::builder()
.wrap(Tracing)
.add_default_header(("User-Agent", user_agent.clone()))
.finish(),
)),
consecutive_errors: Rc::new(AtomicUsize::new(0)),
error_limit: 3,
key_id, key_id,
private_key: Arc::new(private_key), user_agent,
rng: SystemRandom::new(), private_key,
config: Config::new_with_spawner(spawner).mastodon_compat(), config: Config::default().mastodon_compat(),
breakers, breakers,
last_online,
} }
} }
pub(crate) fn spawner(mut self, spawner: Spawner) -> Self { fn count_err(&self) {
self.config = self.config.set_spawner(spawner); let count = self.consecutive_errors.fetch_add(1, Ordering::Relaxed);
self if count + 1 >= self.error_limit {
} warn!("{} consecutive errors, rebuilding http client", count);
*self.client.borrow_mut() = Client::builder()
pub(crate) fn reset_breaker(&self, iri: &IriString) { .wrap(Tracing)
self.breakers.succeed(iri); .add_default_header(("User-Agent", self.user_agent.clone()))
} .finish();
self.reset_err();
async fn check_response(
&self,
parsed_url: &IriString,
strategy: BreakerStrategy,
res: Result<reqwest::Response, reqwest_middleware::Error>,
) -> Result<reqwest::Response, Error> {
if res.is_err() {
self.breakers.fail(&parsed_url);
} }
let res = res?;
let status = res.status();
let success = match strategy {
BreakerStrategy::Require2XX => status.is_success(),
BreakerStrategy::Allow401AndBelow => (200..=401).contains(&status.as_u16()),
BreakerStrategy::Allow404AndBelow => (200..=404).contains(&status.as_u16()),
};
if !success {
self.breakers.fail(&parsed_url);
if let Ok(s) = res.text().await {
if !s.is_empty() {
tracing::debug!("Response from {parsed_url}, {s}");
}
}
return Err(ErrorKind::Status(
parsed_url.to_string(),
crate::http1::status_to_http02(status),
)
.into());
}
// only actually succeed a breaker on 2xx response
if status.is_success() {
self.last_online.mark_seen(&parsed_url);
self.breakers.succeed(&parsed_url);
}
Ok(res)
} }
#[tracing::instrument(name = "Fetch Json", skip(self), fields(signing_string))] fn reset_err(&self) {
pub(crate) async fn fetch_json<T>( self.consecutive_errors.swap(0, Ordering::Relaxed);
&self, }
url: &IriString,
strategy: BreakerStrategy, #[tracing::instrument(name = "Fetch Json")]
) -> Result<T, Error> pub(crate) async fn fetch_json<T>(&self, url: &str) -> Result<T, Error>
where where
T: serde::de::DeserializeOwned, T: serde::de::DeserializeOwned,
{ {
self.do_fetch(url, "application/json", strategy).await self.do_fetch(url, "application/json").await
} }
#[tracing::instrument(name = "Fetch Json", skip(self), fields(signing_string))] #[tracing::instrument(name = "Fetch Activity+Json")]
pub(crate) async fn fetch_json_msky<T>( pub(crate) async fn fetch<T>(&self, url: &str) -> Result<T, Error>
&self,
url: &IriString,
strategy: BreakerStrategy,
) -> Result<T, Error>
where where
T: serde::de::DeserializeOwned, T: serde::de::DeserializeOwned,
{ {
let stream = self self.do_fetch(url, "application/activity+json").await
.do_deliver(
url,
&serde_json::json!({}),
"application/json",
"application/json",
strategy,
)
.await?
.bytes_stream();
let body = aggregate(limit_stream(stream, JSON_SIZE_LIMIT)).await?;
Ok(serde_json::from_slice(&body)?)
} }
#[tracing::instrument(name = "Fetch Activity+Json", skip(self), fields(signing_string))] async fn do_fetch<T>(&self, url: &str, accept: &str) -> Result<T, Error>
pub(crate) async fn fetch<T>(
&self,
url: &IriString,
strategy: BreakerStrategy,
) -> Result<T, Error>
where where
T: serde::de::DeserializeOwned, T: serde::de::DeserializeOwned,
{ {
self.do_fetch(url, "application/activity+json", strategy) let parsed_url = url.parse::<IriString>()?;
.await
}
async fn do_fetch<T>( if !self.breakers.should_try(&parsed_url) {
&self,
url: &IriString,
accept: &str,
strategy: BreakerStrategy,
) -> Result<T, Error>
where
T: serde::de::DeserializeOwned,
{
let stream = self
.do_fetch_response(url, accept, strategy)
.await?
.bytes_stream();
let body = aggregate(limit_stream(stream, JSON_SIZE_LIMIT)).await?;
Ok(serde_json::from_slice(&body)?)
}
#[tracing::instrument(name = "Fetch response", skip(self), fields(signing_string))]
pub(crate) async fn fetch_response(
&self,
url: &IriString,
strategy: BreakerStrategy,
) -> Result<reqwest::Response, Error> {
self.do_fetch_response(url, "*/*", strategy).await
}
pub(crate) async fn do_fetch_response(
&self,
url: &IriString,
accept: &str,
strategy: BreakerStrategy,
) -> Result<reqwest::Response, Error> {
if !self.breakers.should_try(url) {
return Err(ErrorKind::Breaker.into()); return Err(ErrorKind::Breaker.into());
} }
let signer = self.signer(); let signer = self.signer();
let span = tracing::Span::current();
let request = self let client: Client = self.client.borrow().clone();
.client let res = client
.get(url.as_str()) .get(url)
.header("Accept", accept) .insert_header(("Accept", accept))
.header("Date", Date(SystemTime::now().into()).to_string()) .insert_header(Date(SystemTime::now().into()))
.signature(&self.config, self.key_id.clone(), move |signing_string| { .signature(
span.record("signing_string", signing_string); self.config.clone(),
span.in_scope(|| signer.sign(signing_string)) self.key_id.clone(),
}) move |signing_string| signer.sign(signing_string),
.await?; )
.await?
.send()
.await;
let res = self.client.execute(request).await; if res.is_err() {
self.count_err();
self.breakers.fail(&parsed_url);
}
let res = self.check_response(url, strategy, res).await?; let mut res = res.map_err(|e| ErrorKind::SendRequest(url.to_string(), e.to_string()))?;
Ok(res) self.reset_err();
if !res.status().is_success() {
if let Ok(bytes) = res.body().await {
if let Ok(s) = String::from_utf8(bytes.as_ref().to_vec()) {
if !s.is_empty() {
debug!("Response from {}, {}", url, s);
}
}
}
self.breakers.fail(&parsed_url);
return Err(ErrorKind::Status(url.to_string(), res.status()).into());
}
self.breakers.succeed(&parsed_url);
let body = res
.body()
.await
.map_err(|e| ErrorKind::ReceiveResponse(url.to_string(), e.to_string()))?;
Ok(serde_json::from_slice(body.as_ref())?)
}
#[tracing::instrument(name = "Fetch Bytes")]
pub(crate) async fn fetch_bytes(&self, url: &str) -> Result<(String, Bytes), Error> {
let parsed_url = url.parse::<IriString>()?;
if !self.breakers.should_try(&parsed_url) {
return Err(ErrorKind::Breaker.into());
}
info!("Fetching bytes for {}", url);
let signer = self.signer();
let client: Client = self.client.borrow().clone();
let res = client
.get(url)
.insert_header(("Accept", "*/*"))
.insert_header(Date(SystemTime::now().into()))
.signature(
self.config.clone(),
self.key_id.clone(),
move |signing_string| signer.sign(signing_string),
)
.await?
.send()
.await;
if res.is_err() {
self.breakers.fail(&parsed_url);
self.count_err();
}
let mut res = res.map_err(|e| ErrorKind::SendRequest(url.to_string(), e.to_string()))?;
self.reset_err();
let content_type = if let Some(content_type) = res.headers().get("content-type") {
if let Ok(s) = content_type.to_str() {
s.to_owned()
} else {
return Err(ErrorKind::ContentType.into());
}
} else {
return Err(ErrorKind::ContentType.into());
};
if !res.status().is_success() {
if let Ok(bytes) = res.body().await {
if let Ok(s) = String::from_utf8(bytes.as_ref().to_vec()) {
if !s.is_empty() {
debug!("Response from {}, {}", url, s);
}
}
}
self.breakers.fail(&parsed_url);
return Err(ErrorKind::Status(url.to_string(), res.status()).into());
}
self.breakers.succeed(&parsed_url);
let bytes = match res.body().limit(1024 * 1024 * 4).await {
Err(e) => {
return Err(ErrorKind::ReceiveResponse(url.to_string(), e.to_string()).into());
}
Ok(bytes) => bytes,
};
Ok((content_type, bytes))
} }
#[tracing::instrument( #[tracing::instrument(
"Deliver to Inbox", "Deliver to Inbox",
skip_all, fields(self, inbox = inbox.to_string().as_str(), item)
fields(inbox = inbox.to_string().as_str(), signing_string)
)] )]
pub(crate) async fn deliver<T>( pub(crate) async fn deliver<T>(&self, inbox: IriString, item: &T) -> Result<(), Error>
&self,
inbox: &IriString,
item: &T,
strategy: BreakerStrategy,
) -> Result<(), Error>
where
T: serde::ser::Serialize + std::fmt::Debug,
{
self.do_deliver(
inbox,
item,
"application/activity+json",
"application/activity+json",
strategy,
)
.await?;
Ok(())
}
async fn do_deliver<T>(
&self,
inbox: &IriString,
item: &T,
content_type: &str,
accept: &str,
strategy: BreakerStrategy,
) -> Result<reqwest::Response, Error>
where where
T: serde::ser::Serialize + std::fmt::Debug, T: serde::ser::Serialize + std::fmt::Debug,
{ {
@ -399,60 +359,73 @@ impl Requests {
} }
let signer = self.signer(); let signer = self.signer();
let span = tracing::Span::current();
let item_string = serde_json::to_string(item)?; let item_string = serde_json::to_string(item)?;
let request = self let client: Client = self.client.borrow().clone();
.client let (req, body) = client
.post(inbox.as_str()) .post(inbox.as_str())
.header("Accept", accept) .insert_header(("Accept", "application/activity+json"))
.header("Content-Type", content_type) .insert_header(("Content-Type", "application/activity+json"))
.header("Date", Date(SystemTime::now().into()).to_string()) .insert_header(Date(SystemTime::now().into()))
.signature_with_digest( .signature_with_digest(
self.config.clone(), self.config.clone(),
self.key_id.clone(), self.key_id.clone(),
Sha256::new(), Sha256::new(),
item_string, item_string,
move |signing_string| { move |signing_string| signer.sign(signing_string),
span.record("signing_string", signing_string);
span.in_scope(|| signer.sign(signing_string))
},
) )
.await?; .await?
.split();
let res = self.client.execute(request).await; let res = req.send_body(body).await;
let res = self.check_response(inbox, strategy, res).await?; if res.is_err() {
self.count_err();
self.breakers.fail(&inbox);
}
Ok(res) let mut res = res.map_err(|e| ErrorKind::SendRequest(inbox.to_string(), e.to_string()))?;
self.reset_err();
if !res.status().is_success() {
if let Ok(bytes) = res.body().await {
if let Ok(s) = String::from_utf8(bytes.as_ref().to_vec()) {
if !s.is_empty() {
debug!("Response from {}, {}", inbox.as_str(), s);
}
}
}
self.breakers.fail(&inbox);
return Err(ErrorKind::Status(inbox.to_string(), res.status()).into());
}
self.breakers.succeed(&inbox);
Ok(())
} }
fn signer(&self) -> Signer { fn signer(&self) -> Signer {
Signer { Signer {
private_key: self.private_key.clone(), private_key: self.private_key.clone(),
rng: self.rng.clone(),
} }
} }
} }
struct Signer { struct Signer {
private_key: Arc<RsaKeyPair>, private_key: RsaPrivateKey,
rng: SystemRandom,
} }
impl Signer { impl Signer {
fn sign(&self, signing_string: &str) -> Result<String, Error> { fn sign(&self, signing_string: &str) -> Result<String, Error> {
let mut signature = vec![0; self.private_key.public().modulus_len()]; let hashed = Sha256::digest(signing_string.as_bytes());
let bytes = self.private_key.sign(
self.private_key PaddingScheme::PKCS1v15Sign {
.sign( hash: Some(Hash::SHA2_256),
&RSA_PKCS1_SHA256, },
&self.rng, &hashed,
signing_string.as_bytes(), )?;
&mut signature, Ok(base64::encode(bytes))
)
.map_err(|_| ErrorKind::SignRequest)?;
Ok(STANDARD.encode(&signature))
} }
} }

View file

@ -13,9 +13,9 @@ use activitystreams::{
}; };
use activitystreams_ext::Ext1; use activitystreams_ext::Ext1;
use actix_web::{web, Responder}; use actix_web::{web, Responder};
use rsa::pkcs8::EncodePublicKey; use rsa::pkcs8::ToPublicKey;
#[tracing::instrument(name = "Actor", skip(config, state))] #[tracing::instrument(name = "Actor")]
pub(crate) async fn route( pub(crate) async fn route(
state: web::Data<State>, state: web::Data<State>,
config: web::Data<Config>, config: web::Data<Config>,
@ -26,9 +26,7 @@ pub(crate) async fn route(
public_key: PublicKeyInner { public_key: PublicKeyInner {
id: config.generate_url(UrlKind::MainKey), id: config.generate_url(UrlKind::MainKey),
owner: config.generate_url(UrlKind::Actor), owner: config.generate_url(UrlKind::Actor),
public_key_pem: state public_key_pem: state.public_key.to_public_key_pem()?,
.public_key
.to_public_key_pem(rsa::pkcs8::LineEnding::default())?,
}, },
}, },
); );

View file

@ -1,7 +0,0 @@
use crate::{data::State, error::Error};
use actix_web::{web, HttpResponse};
pub(crate) async fn route(state: web::Data<State>) -> Result<HttpResponse, Error> {
state.db.check_health().await?;
Ok(HttpResponse::Ok().finish())
}

View file

@ -15,9 +15,9 @@ use activitystreams::{
}; };
use actix_web::{web, HttpResponse}; use actix_web::{web, HttpResponse};
use http_signature_normalization_actix::prelude::{DigestVerified, SignatureVerified}; use http_signature_normalization_actix::prelude::{DigestVerified, SignatureVerified};
use tracing::error;
#[tracing::instrument(name = "Inbox", skip_all, fields(id = tracing::field::debug(&input.id_unchecked()), kind = tracing::field::debug(&input.kind())))] #[tracing::instrument(name = "Inbox")]
#[allow(clippy::too_many_arguments)]
pub(crate) async fn route( pub(crate) async fn route(
state: web::Data<State>, state: web::Data<State>,
actors: web::Data<ActorCache>, actors: web::Data<ActorCache>,
@ -25,45 +25,17 @@ pub(crate) async fn route(
client: web::Data<Requests>, client: web::Data<Requests>,
jobs: web::Data<JobServer>, jobs: web::Data<JobServer>,
input: web::Json<AcceptedActivities>, input: web::Json<AcceptedActivities>,
digest_verified: Option<DigestVerified>, verified: Option<(SignatureVerified, DigestVerified)>,
signature_verified: Option<SignatureVerified>,
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let input = input.into_inner(); let input = input.into_inner();
let kind = input.kind().ok_or(ErrorKind::MissingKind)?; let actor = actors
.get(
if digest_verified.is_some() && signature_verified.is_none() && *kind == ValidTypes::Delete { input.actor()?.as_single_id().ok_or(ErrorKind::MissingId)?,
return Ok(accepted(serde_json::json!({}))); &client,
} else if config.validate_signatures() )
&& (digest_verified.is_none() || signature_verified.is_none()) .await?
{ .into_inner();
return Err(ErrorKind::NoSignature(None).into());
}
let actor_id = if input.id_unchecked().is_some() {
input.actor()?.as_single_id().ok_or(ErrorKind::MissingId)?
} else {
input
.actor_unchecked()
.as_single_id()
.ok_or(ErrorKind::MissingId)?
};
let actor = actors.get(actor_id, &client).await?.into_inner();
if let Some(verified) = signature_verified {
if actor.public_key_id.as_str() != verified.key_id() {
tracing::error!("Actor signed with wrong key");
return Err(ErrorKind::BadActor(
actor.public_key_id.to_string(),
verified.key_id().to_owned(),
)
.into());
}
} else if config.validate_signatures() {
tracing::error!("This case should never be reachable, since I handle signature checks earlier in the flow. If you see this in a log it means I did it wrong");
return Err(ErrorKind::NoSignature(Some(actor.public_key_id.to_string())).into());
}
let is_allowed = state.db.is_allowed(actor.id.clone()).await?; let is_allowed = state.db.is_allowed(actor.id.clone()).await?;
let is_connected = state.db.is_connected(actor.id.clone()).await?; let is_connected = state.db.is_connected(actor.id.clone()).await?;
@ -76,16 +48,29 @@ pub(crate) async fn route(
return Err(ErrorKind::NotSubscribed(actor.id.to_string()).into()); return Err(ErrorKind::NotSubscribed(actor.id.to_string()).into());
} }
match kind { if config.validate_signatures() && verified.is_none() {
return Err(ErrorKind::NoSignature(actor.public_key_id.to_string()).into());
} else if config.validate_signatures() {
if let Some((verified, _)) = verified {
if actor.public_key_id.as_str() != verified.key_id() {
error!("Bad actor, more info: {:?}", input);
return Err(ErrorKind::BadActor(
actor.public_key_id.to_string(),
verified.key_id().to_owned(),
)
.into());
}
}
}
match input.kind().ok_or(ErrorKind::MissingKind)? {
ValidTypes::Accept => handle_accept(&config, input).await?, ValidTypes::Accept => handle_accept(&config, input).await?,
ValidTypes::Reject => handle_reject(&config, &jobs, input, actor).await?, ValidTypes::Reject => handle_reject(&config, &jobs, input, actor).await?,
ValidTypes::Announce | ValidTypes::Create => { ValidTypes::Announce | ValidTypes::Create => {
handle_announce(&state, &jobs, input, actor).await? handle_announce(&state, &jobs, input, actor).await?
} }
ValidTypes::Follow => handle_follow(&config, &jobs, input, actor).await?, ValidTypes::Follow => handle_follow(&config, &jobs, input, actor).await?,
ValidTypes::Add | ValidTypes::Delete | ValidTypes::Remove | ValidTypes::Update => { ValidTypes::Delete | ValidTypes::Update => handle_forward(&jobs, input, actor).await?,
handle_forward(&jobs, input, actor).await?
}
ValidTypes::Undo => handle_undo(&config, &jobs, input, actor, is_connected).await?, ValidTypes::Undo => handle_undo(&config, &jobs, input, actor, is_connected).await?,
}; };
@ -217,7 +202,7 @@ async fn handle_announce(
.as_single_id() .as_single_id()
.ok_or(ErrorKind::MissingId)?; .ok_or(ErrorKind::MissingId)?;
if state.is_cached(object_id) { if state.is_cached(object_id).await {
return Err(ErrorKind::Duplicate.into()); return Err(ErrorKind::Duplicate.into());
} }

View file

@ -1,91 +1,27 @@
use crate::{ use crate::{
config::Config, config::Config,
data::{Node, State}, data::State,
error::{Error, ErrorKind}, error::{Error, ErrorKind},
}; };
use actix_web::{web, HttpResponse}; use actix_web::{web, HttpResponse};
use rand::{seq::SliceRandom, thread_rng}; use rand::{seq::SliceRandom, thread_rng};
use std::io::BufWriter; use std::io::BufWriter;
use tracing::error;
const MINIFY_CONFIG: minify_html::Cfg = minify_html::Cfg { #[tracing::instrument(name = "Index")]
do_not_minify_doctype: true,
ensure_spec_compliant_unquoted_attribute_values: true,
keep_closing_tags: true,
keep_html_and_head_opening_tags: false,
keep_spaces_between_attributes: true,
keep_comments: false,
keep_input_type_text_attr: true,
keep_ssi_comments: false,
preserve_brace_template_syntax: false,
preserve_chevron_percent_template_syntax: false,
minify_css: true,
minify_js: true,
remove_bangs: true,
remove_processing_instructions: true,
};
fn open_reg(node: &Node) -> bool {
node.instance
.as_ref()
.map(|i| i.reg)
.or_else(|| node.info.as_ref().map(|i| i.reg))
.unwrap_or(false)
}
#[tracing::instrument(name = "Index", skip(config, state))]
pub(crate) async fn route( pub(crate) async fn route(
state: web::Data<State>, state: web::Data<State>,
config: web::Data<Config>, config: web::Data<Config>,
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let all_nodes = state.node_cache.nodes().await?; let mut nodes = state.node_cache().nodes().await?;
nodes.shuffle(&mut thread_rng());
let mut nodes = Vec::new();
let mut local = Vec::new();
for node in all_nodes {
if !state.is_connected(&node.base) {
continue;
}
if node
.base
.authority_str()
.map(|authority| {
config
.local_domains()
.iter()
.any(|domain| domain.as_str() == authority)
})
.unwrap_or(false)
{
local.push(node);
} else {
nodes.push(node);
}
}
nodes.sort_by(|lhs, rhs| match (open_reg(lhs), open_reg(rhs)) {
(true, true) | (false, false) => std::cmp::Ordering::Equal,
(true, false) => std::cmp::Ordering::Less,
(false, true) => std::cmp::Ordering::Greater,
});
if let Some((i, _)) = nodes.iter().enumerate().find(|(_, node)| !open_reg(node)) {
nodes[..i].shuffle(&mut thread_rng());
nodes[i..].shuffle(&mut thread_rng());
} else {
nodes.shuffle(&mut thread_rng());
}
let mut buf = BufWriter::new(Vec::new()); let mut buf = BufWriter::new(Vec::new());
crate::templates::index_html(&mut buf, &local, &nodes, &config)?; crate::templates::index(&mut buf, &nodes, &config)?;
let html = buf.into_inner().map_err(|e| { let buf = buf.into_inner().map_err(|e| {
tracing::error!("Error rendering template, {}", e.error()); error!("Error rendering template, {}", e.error());
ErrorKind::FlushBuffer ErrorKind::FlushBuffer
})?; })?;
let html = minify_html::minify(&html, &MINIFY_CONFIG); Ok(HttpResponse::Ok().content_type("text/html").body(buf))
Ok(HttpResponse::Ok().content_type("text/html").body(html))
} }

View file

@ -1,16 +1,11 @@
use crate::{ use crate::{data::MediaCache, error::Error, requests::Requests};
data::MediaCache, use actix_web::{
error::Error, http::header::{CacheControl, CacheDirective},
requests::{BreakerStrategy, Requests}, web, HttpResponse,
stream::limit_stream,
}; };
use actix_web::{body::BodyStream, web, HttpResponse};
use uuid::Uuid; use uuid::Uuid;
// 16 MB #[tracing::instrument(name = "Media")]
const IMAGE_SIZE_LIMIT: usize = 16 * 1024 * 1024;
#[tracing::instrument(name = "Media", skip(media, requests))]
pub(crate) async fn route( pub(crate) async fn route(
media: web::Data<MediaCache>, media: web::Data<MediaCache>,
requests: web::Data<Requests>, requests: web::Data<Requests>,
@ -18,25 +13,30 @@ pub(crate) async fn route(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let uuid = uuid.into_inner(); let uuid = uuid.into_inner();
if let Some((content_type, bytes)) = media.get_bytes(uuid).await? {
return Ok(cached(content_type, bytes));
}
if let Some(url) = media.get_url(uuid).await? { if let Some(url) = media.get_url(uuid).await? {
let res = requests let (content_type, bytes) = requests.fetch_bytes(url.as_str()).await?;
.fetch_response(&url, BreakerStrategy::Allow404AndBelow)
media
.store_bytes(uuid, content_type.clone(), bytes.clone())
.await?; .await?;
let mut response = HttpResponse::build(crate::http1::status_to_http02(res.status())); return Ok(cached(content_type, bytes));
for (name, value) in res.headers().iter().filter(|(h, _)| *h != "connection") {
response.insert_header((
crate::http1::name_to_http02(name),
crate::http1::value_to_http02(value),
));
}
return Ok(response.body(BodyStream::new(limit_stream(
res.bytes_stream(),
IMAGE_SIZE_LIMIT,
))));
} }
Ok(HttpResponse::NotFound().finish()) Ok(HttpResponse::NotFound().finish())
} }
fn cached(content_type: String, bytes: web::Bytes) -> HttpResponse {
HttpResponse::Ok()
.insert_header(CacheControl(vec![
CacheDirective::Public,
CacheDirective::MaxAge(60 * 60 * 24),
CacheDirective::Extension("immutable".to_owned(), None),
]))
.content_type(content_type)
.body(bytes)
}

View file

@ -1,5 +1,4 @@
mod actor; mod actor;
mod healthz;
mod inbox; mod inbox;
mod index; mod index;
mod media; mod media;
@ -8,7 +7,6 @@ mod statics;
pub(crate) use self::{ pub(crate) use self::{
actor::route as actor, actor::route as actor,
healthz::route as healthz,
inbox::route as inbox, inbox::route as inbox,
index::route as index, index::route as index,
media::route as media, media::route as media,

View file

@ -5,7 +5,7 @@ use crate::{
use actix_web::{web, Responder}; use actix_web::{web, Responder};
use actix_webfinger::Link; use actix_webfinger::Link;
#[tracing::instrument(name = "Well Known NodeInfo", skip(config))] #[tracing::instrument(name = "Well Known NodeInfo")]
pub(crate) async fn well_known(config: web::Data<Config>) -> impl Responder { pub(crate) async fn well_known(config: web::Data<Config>) -> impl Responder {
web::Json(Links { web::Json(Links {
links: vec![Link { links: vec![Link {
@ -24,28 +24,11 @@ struct Links {
links: Vec<Link>, links: Vec<Link>,
} }
#[tracing::instrument(name = "NodeInfo", skip_all)] #[tracing::instrument(name = "NodeInfo")]
pub(crate) async fn route( pub(crate) async fn route(
config: web::Data<Config>, config: web::Data<Config>,
state: web::Data<State>, state: web::Data<State>,
) -> web::Json<NodeInfo> { ) -> web::Json<NodeInfo> {
let inboxes = state.db.inboxes().await;
let blocks = if config.publish_blocks() {
Some(state.db.blocks().await.unwrap_or_default())
} else {
None
};
let peers = inboxes
.unwrap_or_default()
.iter()
.filter_map(|listener| listener.authority_str())
.map(|s| s.to_owned())
.collect();
let open_registrations = !config.restricted_mode();
web::Json(NodeInfo { web::Json(NodeInfo {
version: NodeInfoVersion, version: NodeInfoVersion,
software: Software { software: Software {
@ -57,7 +40,7 @@ pub(crate) async fn route(
inbound: vec![], inbound: vec![],
outbound: vec![], outbound: vec![],
}, },
open_registrations, open_registrations: false,
usage: Usage { usage: Usage {
users: Users { users: Users {
total: 1, total: 1,
@ -67,7 +50,22 @@ pub(crate) async fn route(
local_posts: 0, local_posts: 0,
local_comments: 0, local_comments: 0,
}, },
metadata: Metadata { peers, blocks }, metadata: Metadata {
peers: state
.db
.inboxes()
.await
.unwrap_or_default()
.iter()
.filter_map(|listener| listener.authority_str())
.map(|s| s.to_owned())
.collect(),
blocks: if config.publish_blocks() {
Some(state.db.blocks().await.unwrap_or_default())
} else {
None
},
},
}) })
} }

View file

@ -5,7 +5,7 @@ use actix_web::{
}; };
#[allow(clippy::async_yields_async)] #[allow(clippy::async_yields_async)]
#[tracing::instrument(name = "Statics")] #[tracing::instrument(name = "Statistics")]
pub(crate) async fn route(filename: web::Path<String>) -> HttpResponse { pub(crate) async fn route(filename: web::Path<String>) -> HttpResponse {
if let Some(data) = StaticFile::get(&filename.into_inner()) { if let Some(data) = StaticFile::get(&filename.into_inner()) {
HttpResponse::Ok() HttpResponse::Ok()

View file

@ -1,92 +0,0 @@
use async_cpupool::CpuPool;
use http_signature_normalization_actix::{Canceled, Spawn};
use std::time::Duration;
#[derive(Clone)]
pub(crate) struct Spawner {
pool: CpuPool,
}
impl Spawner {
pub(crate) fn build(name: &'static str, threads: u16) -> color_eyre::Result<Self> {
let pool = CpuPool::configure()
.name(name)
.max_threads(threads)
.build()?;
Ok(Spawner { pool })
}
pub(crate) async fn close(self) {
self.pool.close().await;
}
}
impl std::fmt::Debug for Spawner {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Spawner").finish()
}
}
async fn timer<Fut>(fut: Fut) -> Fut::Output
where
Fut: std::future::Future,
{
let id = uuid::Uuid::new_v4();
metrics::counter!("relay.spawner.wait-timer.start").increment(1);
let mut interval = tokio::time::interval(Duration::from_secs(5));
// pass the first tick (instant)
interval.tick().await;
let mut fut = std::pin::pin!(fut);
let mut counter = 0;
loop {
tokio::select! {
out = &mut fut => {
metrics::counter!("relay.spawner.wait-timer.end").increment(1);
return out;
}
_ = interval.tick() => {
counter += 1;
metrics::counter!("relay.spawner.wait-timer.pending").increment(1);
tracing::warn!("Blocking operation {id} is taking a long time, {} seconds", counter * 5);
}
}
}
}
impl Spawn for Spawner {
type Future<T> = std::pin::Pin<Box<dyn std::future::Future<Output = Result<T, Canceled>>>>;
fn spawn_blocking<Func, Out>(&self, func: Func) -> Self::Future<Out>
where
Func: FnOnce() -> Out + Send + 'static,
Out: Send + 'static,
{
let pool = self.pool.clone();
Box::pin(async move { timer(pool.spawn(func)).await.map_err(|_| Canceled) })
}
}
impl http_signature_normalization_reqwest::Spawn for Spawner {
type Future<T> = std::pin::Pin<Box<dyn std::future::Future<Output = Result<T, http_signature_normalization_reqwest::Canceled>> + Send>> where T: Send;
fn spawn_blocking<Func, Out>(&self, func: Func) -> Self::Future<Out>
where
Func: FnOnce() -> Out + Send + 'static,
Out: Send + 'static,
{
let pool = self.pool.clone();
Box::pin(async move {
timer(pool.spawn(func))
.await
.map_err(|_| http_signature_normalization_reqwest::Canceled)
})
}
}

View file

@ -1,59 +0,0 @@
use crate::error::{Error, ErrorKind};
use actix_web::web::{Bytes, BytesMut};
use futures_core::Stream;
use streem::IntoStreamer;
pub(crate) fn limit_stream<'a, S>(
input: S,
limit: usize,
) -> impl Stream<Item = Result<Bytes, Error>> + Send + 'a
where
S: Stream<Item = reqwest::Result<Bytes>> + Send + 'a,
{
streem::try_from_fn(move |yielder| async move {
let stream = std::pin::pin!(input);
let mut stream = stream.into_streamer();
let mut count = 0;
while let Some(bytes) = stream.try_next().await? {
count += bytes.len();
if count > limit {
return Err(ErrorKind::BodyTooLarge.into());
}
yielder.yield_ok(bytes).await;
}
Ok(())
})
}
pub(crate) async fn aggregate<S>(input: S) -> Result<Bytes, Error>
where
S: Stream<Item = Result<Bytes, Error>>,
{
let stream = std::pin::pin!(input);
let mut streamer = stream.into_streamer();
let mut buf = Vec::new();
while let Some(bytes) = streamer.try_next().await? {
buf.push(bytes);
}
if buf.len() == 1 {
return Ok(buf.pop().expect("buf has exactly one element"));
}
let total_size: usize = buf.iter().map(|b| b.len()).sum();
let mut bytes_mut = BytesMut::with_capacity(total_size);
for bytes in &buf {
bytes_mut.extend_from_slice(&bytes);
}
Ok(bytes_mut.freeze())
}

View file

@ -1,130 +0,0 @@
use crate::db::Db;
use std::sync::Arc;
use teloxide::{
dispatching::{Dispatcher, UpdateFilterExt},
requests::{Requester, ResponseResult},
types::{Message, Update},
utils::command::BotCommands,
Bot,
};
#[derive(BotCommands, Clone, Debug)]
#[command(
rename_rule = "lowercase",
description = "These commands are for administering AodeRelay"
)]
enum Command {
#[command(description = "Display this text.")]
Start,
#[command(description = "Display this text.")]
Help,
#[command(description = "Block a domain from the relay.")]
Block { domain: String },
#[command(description = "Unblock a domain from the relay.")]
Unblock { domain: String },
#[command(description = "Allow a domain to connect to the relay (for RESTRICTED_MODE)")]
Allow { domain: String },
#[command(description = "Disallow a domain to connect to the relay (for RESTRICTED_MODE)")]
Disallow { domain: String },
#[command(description = "List blocked domains")]
ListBlocks,
#[command(description = "List allowed domains")]
ListAllowed,
#[command(description = "List connected domains")]
ListConnected,
}
pub(crate) fn start(admin_handle: String, db: Db, token: &str) {
let bot = Bot::new(token);
let admin_handle = Arc::new(admin_handle);
tokio::spawn(async move {
let command_handler = teloxide::filter_command::<Command, _>().endpoint(
move |bot: Bot, msg: Message, cmd: Command| {
let admin_handle = admin_handle.clone();
let db = db.clone();
async move {
if !is_admin(&admin_handle, &msg) {
bot.send_message(msg.chat.id, "You are not authorized")
.await?;
return Ok(());
}
answer(bot, msg, cmd, db).await
}
},
);
let message_handler = Update::filter_message().branch(command_handler);
Dispatcher::builder(bot, message_handler)
.build()
.dispatch()
.await;
});
}
fn is_admin(admin_handle: &str, message: &Message) -> bool {
message
.from
.as_ref()
.and_then(|user| user.username.as_deref())
.map(|username| username == admin_handle)
.unwrap_or(false)
}
#[tracing::instrument(skip(bot, msg, db))]
async fn answer(bot: Bot, msg: Message, cmd: Command, db: Db) -> ResponseResult<()> {
match cmd {
Command::Help | Command::Start => {
bot.send_message(msg.chat.id, Command::descriptions().to_string())
.await?;
}
Command::Block { domain } if db.add_blocks(vec![domain.clone()]).await.is_ok() => {
bot.send_message(msg.chat.id, format!("{domain} has been blocked"))
.await?;
}
Command::Unblock { domain } if db.remove_blocks(vec![domain.clone()]).await.is_ok() => {
bot.send_message(msg.chat.id, format!("{domain} has been unblocked"))
.await?;
}
Command::Allow { domain } if db.add_allows(vec![domain.clone()]).await.is_ok() => {
bot.send_message(msg.chat.id, format!("{domain} has been allowed"))
.await?;
}
Command::Disallow { domain } if db.remove_allows(vec![domain.clone()]).await.is_ok() => {
bot.send_message(msg.chat.id, format!("{domain} has been disallowed"))
.await?;
}
Command::ListAllowed => {
if let Ok(allowed) = db.allows().await {
bot.send_message(msg.chat.id, allowed.join("\n")).await?;
}
}
Command::ListBlocks => {
if let Ok(blocks) = db.blocks().await {
bot.send_message(msg.chat.id, blocks.join("\n")).await?;
}
}
Command::ListConnected => {
if let Ok(connected) = db.connected_ids().await {
bot.send_message(msg.chat.id, connected.join("\n")).await?;
}
}
_ => {
bot.send_message(msg.chat.id, "Internal server error")
.await?;
}
}
Ok(())
}

View file

@ -1,15 +0,0 @@
[Unit]
Description=Activitypub Relay
Documentation=https://git.asonix.dog/asonix/relay
Wants=network.target
After=network.target
[Install]
WantedBy=multi-user.target
[Service]
Type=simple
EnvironmentFile=/etc/systemd/system/example-relay.service.env
ExecStart=/path/to/relay
Restart=always

View file

@ -1,19 +0,0 @@
HOSTNAME='relay.example.com'
ADDR='0.0.0.0'
PORT='8080'
RESTRICTED_MODE='true'
VALIDATE_SIGNATURES='true'
HTTPS='true'
PRETTY_LOG='false'
PUBLISH_BLOCKS='true'
DEBUG='false'
SLED_PATH='/opt/sled'
TELEGRAM_ADMIN_HANDLE='myhandle'
RUST_BACKTRACE='full'
FOOTER_BLURB='Contact <a href="https://masto.example.com/@example">@example</a> for inquiries.'
LOCAL_DOMAINS='masto.example.com'
LOCAL_BLURB='<p>An ActivityPub relay for servers. Currently running somewhere. Let me know if you want to join!</p>'
OPENTELEMETRY_URL='http://otel.example.com:4317'
API_TOKEN='blahblahblahblahblahblahblah'
TELEGRAM_TOKEN='blahblahblahblahblahblahblah'

View file

@ -1,11 +0,0 @@
[Unit]
Description=Activitypub Relay Socket
Before=multi-user.target
After=network.target
[Socket]
Service=example-relay.service
ListenStream=8080
[Install]
WantedBy=sockets.target

View file

@ -4,15 +4,15 @@
@(contact: &Contact, base: &IriString) @(contact: &Contact, base: &IriString)
<div class="admin"> <div class="admin">
<div class="left"> <div class="left">
<figure class="avatar"> <figure class="avatar">
<img loading="lazy" src="@contact.avatar" alt="@contact.display_name's avatar"> <img src="@contact.avatar" alt="@contact.display_name's avatar">
</figure> </figure>
</div> </div>
<div class="right"> <div class="right">
<p class="display-name"><a href="@contact.url">@contact.display_name</a></p> <p class="display-name"><a href="@contact.url">@contact.display_name</a></p>
<p class="username"> <p class="username">
@@@contact.username@if let Some(authority) = base.authority_str() {@@@authority} @@@contact.username@if let Some(authority) = base.authority_str() {@@@authority}
</p> </p>
</div> </div>
</div> </div>

View file

@ -1,124 +1,81 @@
@use crate::{ @use crate::{
config::{Config, UrlKind}, config::{Config, UrlKind},
data::Node, data::Node,
templates::{info_html, instance_html, statics::index_css}, templates::{info, instance, statics::index_css},
}; };
@(local: &[Node], nodes: &[Node], config: &Config) @(nodes: &[Node], config: &Config)
<!doctype html> <!doctype html>
<html> <html>
<head lang="en">
<head lang="en"> <meta charset="utf-8" />
<meta charset="utf-8" /> <meta name="viewport" content="width=device-width, initial-scale=1" />
<meta name="viewport" content="width=device-width, initial-scale=1" /> <title>@config.hostname() | ActivityPub Relay</title>
<title>@config.hostname() | ActivityPub Relay</title> <link rel="stylesheet" href="/static/@index_css.name" type="text/css" />
<link rel="stylesheet" href="/static/@index_css.name" type="text/css" /> </head>
</head> <body>
<header>
<body> <div class="header-text">
<header> <h1>@Config::software_name()<span class="smaller">@Config::software_version()</span></h1>
<div class="header-text"> <p>on @config.hostname()</p>
<h1>@Config::software_name()<span class="smaller">@Config::software_version()</span></h1> </div>
<p>on @config.hostname()</p> </header>
</div> <main>
</header> <section>
<main> <h3>Connected Servers</h3>
@if !local.is_empty() || config.local_blurb().is_some() { @if nodes.is_empty() {
<article> <p>There are no connected servers at this time.</p>
<h3>About</h3> } else {
<section class="local-explainer"> <ul>
@if let Some(blurb) = config.local_blurb() { @for node in nodes {
@blurb @if let Some(inst) = node.instance.as_ref() {
} else { <li>
<p>These domains are run by the same admins as this relay.</p> @:instance(inst, node.info.as_ref().map(|info| { info.software.as_ref() }), node.contact.as_ref(), &node.base)
} </li>
</section> } else {
@if !local.is_empty() { @if let Some(inf) = node.info.as_ref() {
<ul> <li>
@for node in local { @:info(inf, &node.base)
@if let Some(inst) = node.instance.as_ref() { </li>
<li> }
@:instance_html(inst, node.info.as_ref().map(|info| { info.software.as_ref() }), node.contact.as_ref(), }
&node.base) }
</li> </ul>
} else { }
@if let Some(inf) = node.info.as_ref() { </section>
<li> <section>
@:info_html(inf, &node.base) <h3>Joining</h3>
</li> <article class="joining">
} <p>
} If you are the admin of a server that supports activitypub relays, you can add
} this relay to your server.
</ul> </p>
} <h4>Mastodon</h4>
</article> <p>
} Mastodon admins can add this relay by adding
<article> <pre>@config.generate_url(UrlKind::Inbox)</pre> in their relay settings.
<h3>Joining</h3> </p>
<section class="joining"> <h4>Pleroma</h4>
@if config.restricted_mode() { <p>
<h4> Pleroma admins can add this relay by adding
This relay is Restricted <pre>@config.generate_url(UrlKind::Actor)</pre>
</h4> to their relay settings (I don't actually know how pleroma handles adding
<p> relays, is it still a mix command?).
This relay is currently in Restricted Mode, which means servers need to be approved ahead of time by the relay </p>
administrator. Please contact the admin before attempting to join. <h4>Others</h4>
</p> <p>
} else { Consult the documentation for your server. It's likely that it follows either
<p> Mastodon or Pleroma's relay formatting.
If you are the admin of a server that supports activitypub relays, you can add </p>
this relay to your server. </article>
</p> </section>
} </main>
<h4>Mastodon</h4> <footer>
<p> <p>
Mastodon admins can add this relay by adding The source code for this project can be found at
<pre>@config.generate_url(UrlKind::Inbox)</pre> in their relay settings. <a href="@config.source_code()">@config.source_code()</a>
</p> </p>
<h4>Pleroma</h4> </footer>
<p> </body>
Pleroma admins can add this relay by adding
<pre>@config.generate_url(UrlKind::Actor)</pre> to their relay settings.
</p>
<h4>Others</h4>
<p>
Consult the documentation for your server. It's likely that it follows either
Mastodon or Pleroma's relay formatting.
</p>
</section>
</article>
@if !nodes.is_empty() {
<article>
<h3>@nodes.len() Connected Servers</h3>
<ul>
@for node in nodes {
@if let Some(inst) = node.instance.as_ref() {
<li>
@:instance_html(inst, node.info.as_ref().map(|info| { info.software.as_ref() }), node.contact.as_ref(),
&node.base)
</li>
} else {
@if let Some(inf) = node.info.as_ref() {
<li>
@:info_html(inf, &node.base)
</li>
}
}
}
</ul>
</article>
}
</main>
<footer>
@if let Some(blurb) = config.footer_blurb() {
<div>@blurb</div>
}
<p>
The source code for this project can be found at
<a href="@config.source_code()">@config.source_code()</a>
</p>
</footer>
</body>
</html> </html>

View file

@ -3,14 +3,14 @@
@(info: &Info, base: &IriString) @(info: &Info, base: &IriString)
<section class="info"> <article class="info">
@if let Some(authority) = base.authority_str() { @if let Some(authority) = base.authority_str() {
<h4 class="padded"><a href="@base">@authority</a></h4> <h4 class="padded"><a href="@base">@authority</a></h4>
}
<p class="padded">
Running @info.software, version @info.version.
@if info.reg {
Registration is open
} }
</p> <p class="padded">
</section> Running @info.software, version @info.version.
@if info.reg {
Registration is open
}
</p>
</article>

View file

@ -1,37 +1,37 @@
@use crate::{db::{Contact, Instance}, templates::admin_html}; @use crate::{db::{Contact, Instance}, templates::admin};
@use activitystreams::iri_string::types::IriString; @use activitystreams::iri_string::types::IriString;
@(instance: &Instance, software: Option<&str>, contact: Option<&Contact>, base: &IriString) @(instance: &Instance, software: Option<&str>, contact: Option<&Contact>, base: &IriString)
<section class="instance"> <article class="instance">
<h4 class="padded"><a href="@base">@instance.title</a></h4> <h4 class="padded"><a href="@base">@instance.title</a></h4>
<p class="padded"> <p class="padded">
@if let Some(software) = software { @if let Some(software) = software {
Running @software, version @instance.version. Running @software, version @instance.version.
} }
@if instance.reg { @if instance.reg {
<br>Registration is open. <br>Registration is open.
@if instance.requires_approval { @if instance.requires_approval {
Accounts must be approved by an admin. Accounts must be approved by an admin.
} }
} else{ } else{
Registration is closed Registration is closed
} }
</p> </p>
@if !instance.description.trim().is_empty() || contact.is_some() { @if !instance.description.trim().is_empty() || contact.is_some() {
<div class="instance-info"> <div class="instance-info">
@if !instance.description.trim().is_empty() { @if !instance.description.trim().is_empty() {
<h5 class="instance-description">@instance.title's description:</h5> <h5 class="instance-description">@instance.title's description:</h5>
<div class="description"> <div class="description">
<div class="please-stay"> <div class="please-stay">
@Html(instance.description.trim()) @Html(instance.description.trim())
</div> </div>
</div>
}
@if let Some(contact) = contact {
<h5 class="instance-admin">@instance.title's admin:</h5>
@:admin(contact, base)
}
</div> </div>
} }
@if let Some(contact) = contact { </article>
<h5 class="instance-admin">@instance.title's admin:</h5>
@:admin_html(contact, base)
}
</div>
}
</section>