Compare commits

..

No commits in common. "main" and "v0.1.0-r104" have entirely different histories.

112 changed files with 5790 additions and 10891 deletions

View file

@ -1,2 +0,0 @@
[build]
rustflags = ["--cfg", "tokio_unstable"]

15
.env
View file

@ -1,13 +1,2 @@
HOSTNAME=localhost:8079
PORT=8079
HTTPS=false
DEBUG=true
RESTRICTED_MODE=true
VALIDATE_SIGNATURES=false
API_TOKEN=somesecretpassword
FOOTER_BLURB="Contact <a href=\"https://masto.asonix.dog/@asonix\">@asonix</a> for inquiries"
LOCAL_DOMAINS="masto.asonix.dog"
LOCAL_BLURB="<p>Welcome to my cool relay where I have cool relay things happening. I hope you enjoy your stay!</p>"
# OPENTELEMETRY_URL=http://localhost:4317
PROMETHEUS_ADDR=127.0.0.1
PROMETHEUS_PORT=9000
OUT_DIR="compiled_templates"
DATABASE_URL=postgres://ap_actix:ap_actix@localhost:5432/ap_actix

View file

@ -1,61 +0,0 @@
on:
push:
branches:
- '*'
pull_request:
branches:
- main
jobs:
clippy:
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Clippy
run: |
cargo clippy --no-default-features -- -D warnings
tests:
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Test
run: cargo test
check:
strategy:
fail-fast: false
matrix:
target:
- x86_64-unknown-linux-musl
- armv7-unknown-linux-musleabihf
- aarch64-unknown-linux-musl
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Debug builds
run: cargo zigbuild --target ${{ matrix.target }}

View file

@ -1,226 +0,0 @@
on:
push:
tags:
- 'v*.*.*'
env:
REGISTRY_IMAGE: asonix/relay
jobs:
clippy:
runs-on: base-image
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Clippy
run: |
# cargo clippy --no-default-features -- -D warnings
cargo clippy --no-default-features
tests:
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Test
run: cargo test
build:
needs:
- clippy
- tests
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
strategy:
fail-fast: false
matrix:
info:
- target: x86_64-unknown-linux-musl
artifact: linux-amd64
platform: linux/amd64
- target: armv7-unknown-linux-musleabihf
artifact: linux-arm32v7
platform: linux/arm/v7
- target: aarch64-unknown-linux-musl
artifact: linux-arm64v8
platform: linux/arm64
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Prepare Platform
run: |
platform=${{ matrix.info.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
shell: bash
-
name: Docker meta
id: meta
uses: https://github.com/docker/metadata-action@v5
with:
images: ${{ env.REGISTRY_IMAGE }}
flavor: |
latest=auto
suffix=-${{ matrix.info.artifact }}
tags: |
type=raw,value=latest,enable={{ is_default_branch }}
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
-
name: Set up QEMU
uses: https://github.com/docker/setup-qemu-action@v3
-
name: Set up Docker Buildx
uses: https://github.com/docker/setup-buildx-action@v3
-
name: Docker login
uses: https://github.com/docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
name: Compile relay
run: cargo zigbuild --target ${{ matrix.info.target }} --release
-
name: Prepare artifacts
run: |
mkdir artifacts
cp target/${{ matrix.info.target }}/release/relay artifacts/relay-${{ matrix.info.artifact }}
-
uses: https://github.com/actions/upload-artifact@v3
with:
name: binaries
path: artifacts/
-
name: Prepare binary
run: |
cp target/${{ matrix.info.target }}/release/relay docker/forgejo/relay
-
name: Build and push ${{ matrix.info.platform }} docker image
id: build
uses: docker/build-push-action@v5
with:
context: ./docker/forgejo
platforms: ${{ matrix.info.platform }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
outputs: type=image,name=${{ env.REGISTRY_IMAGE }},name-canonical=true,push=true
-
name: Export digest
run: |
mkdir -p /tmp/digests
digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${digest#sha256:}"
echo "Created /tmp/digests/${digest#sha256:}"
shell: bash
-
name: Upload ${{ matrix.info.platform }} digest
uses: https://github.com/actions/upload-artifact@v3
with:
name: digests
path: /tmp/digests/*
if-no-files-found: error
retention-days: 1
publish-docker:
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
needs: [build]
steps:
-
name: Download digests
uses: https://github.com/actions/download-artifact@v3
with:
name: digests
path: /tmp/digests
pattern: digests-*
merge-multiple: true
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
-
name: Docker login
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
name: Docker meta
id: meta
uses: https://github.com/docker/metadata-action@v5
with:
images: ${{ env.REGISTRY_IMAGE }}
flavor: |
latest=auto
tags: |
type=raw,value=latest,enable={{ is_default_branch }}
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
-
name: Create manifest list and push
working-directory: /tmp/digests
run: |
tags=$(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "${DOCKER_METADATA_OUTPUT_JSON}")
images=$(printf "${{ env.REGISTRY_IMAGE }}@sha256:%s " *)
echo "Running 'docker buildx imagetools create ${tags[@]} ${images[@]}'"
docker buildx imagetools create ${tags[@]} ${images[@]}
shell: bash
-
name: Inspect Image
run: |
docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ steps.meta.outputs.version }}
publish-forgejo:
needs: [build]
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
- uses: https://github.com/actions/download-artifact@v3
with:
name: binaries
path: artifacts/
merge-multiple: true
- uses: actions/forgejo-release@v1
with:
direction: upload
token: ${{ secrets.GITHUB_TOKEN }}
release-dir: artifacts/
publish-crate:
needs: [build]
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Publish Crate
run: cargo publish --token ${{ secrets.CRATES_IO_TOKEN }}

4
.gitignore vendored
View file

@ -1,6 +1,2 @@
/target
/artifacts
/sled
/.direnv
/.envrc
/result

5190
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,114 +1,60 @@
[package]
name = "ap-relay"
description = "A simple activitypub relay"
version = "0.3.116"
authors = ["asonix <asonix@asonix.dog>"]
license = "AGPL-3.0"
readme = "README.md"
repository = "https://git.asonix.dog/asonix/relay"
keywords = ["activitypub", "relay"]
edition = "2021"
build = "src/build.rs"
[[bin]]
name = "relay"
path = "src/main.rs"
[profile.release]
strip = true
[features]
console = ["dep:console-subscriber"]
default = []
description = "A simple activitypub relay"
version = "0.1.0"
authors = ["asonix <asonix@asonix.dog>"]
license-file = "LICENSE"
readme = "README.md"
repository = "https://git.asonix.dog/asonix/ap-relay"
keywords = ["activitypub", "relay"]
edition = "2018"
build = "src/build.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
actix-web = { version = "4.4.0", default-features = false, features = ["compress-brotli", "compress-gzip", "rustls-0_23"] }
actix-webfinger = { version = "0.5.0", default-features = false }
activitystreams = "0.7.0-alpha.25"
activitystreams-ext = "0.1.0-alpha.3"
ammonia = "4.0.0"
async-cpupool = "0.3.0"
bcrypt = "0.16"
base64 = "0.22"
clap = { version = "4.0.0", features = ["derive"] }
color-eyre = "0.6.2"
config = { version = "0.14.0", default-features = false, features = ["toml", "json", "yaml"] }
console-subscriber = { version = "0.4", optional = true }
dashmap = "6.0.1"
anyhow = "1.0"
actix-rt = "1.1.1"
actix-web = { version = "3.0.0-alpha.2", features = ["rustls"] }
actix-webfinger = "0.3.0-alpha.6"
activitystreams-new = { git = "https://git.asonix.dog/asonix/activitystreams-sketch" }
activitystreams-ext = { git = "https://git.asonix.dog/asonix/activitystreams-ext" }
ammonia = "3.1.0"
async-mutex = "1.0.1"
async-trait = "0.1.24"
background-jobs = "0.8.0-alpha.2"
bytes = "0.5.4"
base64 = "0.12"
config = "0.10.1"
deadpool = "0.5.1"
deadpool-postgres = "0.5.5"
dotenv = "0.15.0"
futures-core = "0.3.30"
lru = "0.12.0"
metrics = "0.23.0"
metrics-exporter-prometheus = { version = "0.15.0", default-features = false, features = [
"http-listener",
] }
metrics-util = "0.17.0"
env_logger = "0.7.1"
futures = "0.3.4"
http-signature-normalization-actix = { version = "0.3.0-alpha.11", default-features = false, features = ["sha-2"] }
log = "0.4"
lru = "0.5.1"
mime = "0.3.16"
minify-html = "0.15.0"
opentelemetry = "0.27.1"
opentelemetry_sdk = { version = "0.27", features = ["rt-tokio"] }
opentelemetry-otlp = { version = "0.27", features = ["grpc-tonic"] }
pin-project-lite = "0.2.9"
# pinned to metrics-util
quanta = "0.12.0"
rand = "0.8"
reqwest = { version = "0.12", default-features = false, features = ["rustls-tls", "stream"]}
reqwest-middleware = { version = "0.4", default-features = false, features = ["json"] }
reqwest-tracing = "0.5.0"
ring = "0.17.5"
rsa = "0.9"
rsa-magic-public-key = "0.8.0"
rustls = { version = "0.23.0", default-features = false, features = ["ring", "logging", "std", "tls12"] }
rustls-channel-resolver = "0.3.0"
rustls-pemfile = "2"
num_cpus = "1.12"
pretty_env_logger = "0.4.0"
rand = "0.7"
rsa = "0.2"
rsa-magic-public-key = "0.1.1"
rsa-pem = "0.1.0"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
sled = "0.34.7"
streem = "0.2.0"
teloxide = { version = "0.13.0", default-features = false, features = [
"ctrlc_handler",
"macros",
"rustls",
] }
thiserror = "2.0"
time = { version = "0.3.17", features = ["serde"] }
tracing = "0.1"
tracing-error = "0.2"
tracing-log = "0.2"
tracing-opentelemetry = "0.28"
tracing-subscriber = { version = "0.3", features = [
"ansi",
"env-filter",
"fmt",
] }
tokio = { version = "1", features = ["full", "tracing"] }
uuid = { version = "1", features = ["v4", "serde"] }
[dependencies.background-jobs]
version = "0.19.0"
default-features = false
features = ["error-logging", "metrics", "tokio"]
[dependencies.http-signature-normalization-actix]
version = "0.11.1"
default-features = false
features = ["server", "ring"]
[dependencies.http-signature-normalization-reqwest]
version = "0.13.0"
default-features = false
features = ["middleware", "ring"]
[dependencies.tracing-actix-web]
version = "0.7.9"
sha2 = "0.9"
structopt = "0.3.12"
thiserror = "1.0"
tokio = { version = "0.2.13", features = ["sync"] }
tokio-postgres = { version = "0.5.1", features = ["with-serde_json-1", "with-uuid-0_8", "with-chrono-0_4"] }
ttl_cache = "0.5.1"
uuid = { version = "0.8", features = ["v4", "serde"] }
[build-dependencies]
color-eyre = "0.6.2"
anyhow = "1.0"
dotenv = "0.15.0"
ructe = { version = "0.17.0", features = ["sass", "mime03"] }
toml = "0.8.0"
ructe = { version = "0.11.0", features = ["sass", "mime03"] }
[profile.dev.package.rsa]
opt-level = 3

1078
LICENSE

File diff suppressed because it is too large Load diff

231
README.md
View file

@ -1,55 +1,31 @@
# AodeRelay
_A simple and efficient activitypub relay_
### Installation
#### Docker
If running docker, you can start the relay with the following command:
```
$ sudo docker run --rm -it \
-v "$(pwd):/mnt/" \
-e ADDR=0.0.0.0 \
-e SLED_PATH=/mnt/sled/db-0.34 \
-p 8080:8080 \
asonix/relay:0.3.85
```
This will launch the relay with the database stored in "./sled/db-0.34" and listening on port 8080
#### Cargo
With cargo installed, the relay can be installed to your cargo bin directory with the following command
```
$ cargo install ap-relay
```
Then it can be run with this:
```
$ ADDR=0.0.0.0 relay
```
This will launch the relay with the database stored in "./sled/db-0.34" and listening on port 8080
#### Source
The relay can be launched directly from this git repository with the following commands:
```
$ git clone https://git.asonix.dog/asonix/relay
$ ADDR=0.0.0.0 cargo run --release
```
### Usage
To simply run the server, the command is as follows
```bash
$ ./relay
```
#### Administration
> **NOTE:** The server _must be running_ in order to update the lists with the following commands
To learn about any other tasks, the `--help` flag can be passed
```bash
$ ./relay --help
relay 0.1.0
An activitypub relay
Usage: relay [OPTIONS]
USAGE:
relay [FLAGS] [OPTIONS]
Options:
-b <BLOCKS> A list of domains that should be blocked
-a <ALLOWED> A list of domains that should be allowed
-u, --undo Undo allowing or blocking domains
-h, --help Print help information
FLAGS:
-h, --help Prints help information
-j, --jobs-only Only process background jobs, do not start the relay server
-n, --no-jobs Only run the relay server, do not process background jobs
-u, --undo Undo whitelisting or blocking domains
-V, --version Prints version information
OPTIONS:
-b <blocks>... A list of domains that should be blocked
-w <whitelists>... A list of domains that should be whitelisted
```
To add domains to the blocklist, use the `-b` flag and pass a list of domains
@ -60,119 +36,19 @@ To remove domains from the blocklist, simply pass the `-u` flag along with `-b`
```bash
$ ./relay -ub asonix.dog blimps.xyz
```
The same rules apply for allowing domains, although domains are allowed with the `-a` flag
The same rules apply for whitelisting domains, although domains are whitelisted with the `-w` flag
```bash
$ ./relay -a asonix.dog blimps.xyz
$ ./relay -ua asonix.dog blimps.xyz
$ ./relay -w asonix.dog blimps.xyz
$ ./relay -uw asonix.dog blimps.xyz
```
### Configuration
By default, all these values are set to development values. These are read from the environment, or
from the `.env` file in the working directory.
```env
HOSTNAME=localhost:8080
ADDR=127.0.0.1
PORT=8080
DEBUG=true
RESTRICTED_MODE=false
VALIDATE_SIGNATURES=false
HTTPS=false
PRETTY_LOG=true
PUBLISH_BLOCKS=false
SLED_PATH=./sled/db-0.34
```
To run this server in production, you'll likely want to set most of them
```env
HOSTNAME=relay.my.tld
ADDR=0.0.0.0
PORT=8080
DEBUG=false
RESTRICTED_MODE=false
VALIDATE_SIGNATURES=true
HTTPS=true
PRETTY_LOG=false
PUBLISH_BLOCKS=true
SLED_PATH=./sled/db-0.34
RUST_LOG=warn
API_TOKEN=somepasswordishtoken
OPENTELEMETRY_URL=localhost:4317
TELEGRAM_TOKEN=secret
TELEGRAM_ADMIN_HANDLE=your_handle
TLS_KEY=/path/to/key
TLS_CERT=/path/to/cert
FOOTER_BLURB="Contact <a href=\"https://masto.asonix.dog/@asonix\">@asonix</a> for inquiries"
LOCAL_DOMAINS=masto.asonix.dog
LOCAL_BLURB="<p>Welcome to my cool relay where I have cool relay things happening. I hope you enjoy your stay!</p>"
PROMETHEUS_ADDR=0.0.0.0
PROMETHEUS_PORT=9000
CLIENT_TIMEOUT=10
DELIVER_CONCURRENCY=8
SIGNATURE_THREADS=2
```
Whitelisted domains are only checked against incoming activities if `WHITELIST_MODE` is enabled.
Blocks can be published in the nodeinfo metadata by setting `PUBLISH_BLOCKS` to true
#### Descriptions
##### `HOSTNAME`
The domain or IP address the relay is hosted on. If you launch the relay on `example.com`, that would be your HOSTNAME. The default is `localhost:8080`
##### `ADDR`
The address the server binds to. By default, this is `127.0.0.1`, so for production cases it should be set to `0.0.0.0` or another public address.
##### `PORT`
The port the server binds to, this is `8080` by default but can be changed if needed.
##### `DEBUG`
Whether to print incoming activities to the console when requests hit the /inbox route. This defaults to `true`, but should be set to `false` in production cases. Since every activity sent to the relay is public anyway, this doesn't represent a security risk.
##### `RESTRICTED_MODE`
This setting enables an 'allowlist' setup where only servers that have been explicitly enabled through the `relay -a` command can join the relay. This is `false` by default. If `RESTRICTED_MODE` is not enabled, then manually allowing domains with `relay -a` has no effect.
##### `VALIDATE_SIGNATURES`
This setting enforces checking HTTP signatures on incoming activities. It defaults to `true`
##### `HTTPS`
Whether the current server is running on an HTTPS port or not. This is used for generating URLs to the current running relay. By default it is set to `true`
##### `PUBLISH_BLOCKS`
Whether or not to publish a list of blocked domains in the `nodeinfo` metadata for the server. It defaults to `false`.
##### `SLED_PATH`
Where to store the on-disk database of connected servers. This defaults to `./sled/db-0.34`.
##### `RUST_LOG`
The log level to print. Available levels are `ERROR`, `WARN`, `INFO`, `DEBUG`, and `TRACE`. You can also specify module paths to enable some logs but not others, such as `RUST_LOG=warn,tracing_actix_web=info,relay=info`. This defaults to `warn`
##### `SOURCE_REPO`
The URL to the source code for the relay. This defaults to `https://git.asonix.dog/asonix/relay`, but should be changed if you're running a fork hosted elsewhere.
##### `REPOSITORY_COMMIT_BASE`
The base path of the repository commit hash reference. For example, `/src/commit/` for Gitea, `/tree/` for GitLab.
##### `API_TOKEN`
The Secret token used to access the admin APIs. This must be set for the commandline to function
##### `OPENTELEMETRY_URL`
A URL for exporting opentelemetry spans. This is mostly useful for debugging. There is no default, since most people probably don't run an opentelemetry collector.
##### `TELEGRAM_TOKEN`
A Telegram Bot Token for running the relay administration bot. There is no default.
##### `TELEGRAM_ADMIN_HANDLE`
The handle of the telegram user allowed to administer the relay. There is no default.
##### `TLS_KEY`
Optional - This is specified if you are running the relay directly on the internet and have a TLS key to provide HTTPS for your relay
##### `TLS_CERT`
Optional - This is specified if you are running the relay directly on the internet and have a TLS certificate chain to provide HTTPS for your relay
##### `FOOTER_BLURB`
Optional - Add custom notes in the footer of the page
##### `LOCAL_DOMAINS`
Optional - domains of mastodon servers run by the same admin as the relay
##### `LOCAL_BLURB`
Optional - description for the relay
##### `PROMETHEUS_ADDR`
Optional - Address to bind to for serving the prometheus scrape endpoint
##### `PROMETHEUS_PORT`
Optional - Port to bind to for serving the prometheus scrape endpoint
##### `CLIENT_TIMEOUT`
Optional - How long the relay will hold open a connection (in seconds) to a remote server during
fetches and deliveries. This defaults to 10
##### `DELIVER_CONCURRENCY`
Optional - How many deliver requests the relay should allow to be in-flight per thread. the default
is 8
##### `SIGNATURE_THREADS`
Optional - Override number of threads used for signing and verifying requests. Default is
`std::thread::available_parallelism()` (It tries to detect how many cores you have). If it cannot
detect the correct number of cores, it falls back to 1.
##### 'PROXY_URL'
Optional - URL of an HTTP proxy to forward outbound requests through
##### 'PROXY_USERNAME'
Optional - username to provide to the HTTP proxy set with `PROXY_URL` through HTTP Basic Auth
##### 'PROXY_PASSWORD'
Optional - password to provide to the HTTP proxy set with `PROXY_URL` through HTTP Basic Auth
For advanced setups, it may be useful to run the relay API and the background tasks in separate
processes, possibly on separate hosts. The `-j` and `-n` flags have been provided for this purpose.
By passing `-n`, a relay can be spawned that handles no deliveries. By passing `-j`, a relay will
not be spawned, but any deliveries existing in the database will be processed.
### Subscribing
Mastodon admins can subscribe to this relay by adding the `/inbox` route to their relay settings.
@ -192,16 +68,10 @@ example, if the server is `https://relay.my.tld`, the correct URL would be
- Follow Public, become a listener of the relay
- Undo Follow {self-actor}, stop listening on the relay, an Undo Follow will be sent back
- Undo Follow Public, stop listening on the relay
- Delete {anything}, the Delete {anything} is relayed verbatim to listening servers.
- Delete {anything}, the Delete {anything} is relayed verbatim to listening servers
Note that this activity will likely be rejected by the listening servers unless it has been
signed with a JSON-LD signature
- Update {anything}, the Update {anything} is relayed verbatim to listening servers.
Note that this activity will likely be rejected by the listening servers unless it has been
signed with a JSON-LD signature
- Add {anything}, the Add {anything} is relayed verbatim to listening servers.
Note that this activity will likely be rejected by the listening servers unless it has been
signed with a JSON-LD signature
- Remove {anything}, the Remove {anything} is relayed verbatim to listening servers.
- Update {anything}, the Update {anything} is relayed verbatim to listening servers
Note that this activity will likely be rejected by the listening servers unless it has been
signed with a JSON-LD signature
@ -209,17 +79,48 @@ example, if the server is `https://relay.my.tld`, the correct URL would be
- Webfinger
- NodeInfo
### Known issues
Pleroma and Akkoma do not support validating JSON-LD signatures, meaning many activities such as Delete, Update, Add, and Remove will be rejected with a message similar to `WARN: Response from https://example.com/inbox, "Invalid HTTP Signature"`. This is normal and not an issue with the relay.
### Configuration
By default, all these values are set to development values. These are read from the environment, or
from the `.env` file in the working directory.
```env
HOSTNAME=localhost:8080
ADDR=127.0.0.1
PORT=8080
DEBUG=true
WHITELIST_MODE=false
VALIDATE_SIGNATURES=false
HTTPS=false
DATABASE_URL=
PRETTY_LOG=true
PUBLISH_BLOCKS=false
MAX_CONNECTIONS=4 # how many postgres connections should be made
```
To run this server in production, you'll likely want to set most of them
```env
HOSTNAME=relay.my.tld
ADDR=0.0.0.0
PORT=8080
DEBUG=false
WHITELIST_MODE=false
VALIDATE_SIGNATURES=true
HTTPS=true
DATABASE_URL=postgres://pg_user:pg_pass@pg_host:pg_port/pg_database
PRETTY_LOG=false
PUBLISH_BLOCKS=true
MAX_CONNECTIONS=16
```
### Contributing
Feel free to open issues for anything you find an issue with. Please note that any contributed code will be licensed under the AGPLv3.
Unless otherwise stated, all contributions to this project will be licensed under the CSL with
the exceptions listed in the License section of this file.
### License
Copyright © 2022 Riley Trautman
AodeRelay is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
AodeRelay is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. This file is part of AodeRelay.
You should have received a copy of the GNU General Public License along with AodeRelay. If not, see [http://www.gnu.org/licenses/](http://www.gnu.org/licenses/).
This work is licensed under the Cooperative Software License. This is not a Free Software
License, but may be considered a "source-available License." For most hobbyists, self-employed
developers, worker-owned companies, and cooperatives, this software can be used in most
projects so long as this software is distributed under the terms of the CSL. For more
information, see the provided LICENSE file. If none exists, the license can be found online
[here](https://lynnesbian.space/csl/). If you are a free software project and wish to use this
software under the terms of the GNU Affero General Public License, please contact me at
[asonix@asonix.dog](mailto:asonix@asonix.dog) and we can sort that out. If you wish to use this
project under any other license, especially in proprietary software, the answer is likely no.

5
diesel.toml Normal file
View file

@ -0,0 +1,5 @@
# For documentation on how to configure this file,
# see diesel.rs/guides/configuring-diesel-cli
[print_schema]
file = "src/schema.rs"

View file

@ -1,24 +0,0 @@
FROM alpine:3.19
ARG UID=991
ARG GID=991
ENV \
UID=${UID} \
GID=${GID}
USER root
RUN \
addgroup -g "${GID}" app && \
adduser -D -G app -u "${UID}" -g "" -h /opt/app app && \
apk add tini && \
chown -R app:app /mnt
COPY relay /usr/local/bin/relay
USER app
EXPOSE 6669
EXPOSE 8080
VOLUME /mnt
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["/usr/local/bin/relay"]

View file

@ -0,0 +1,72 @@
FROM rustembedded/cross:x86_64-unknown-linux-musl AS amd64-builder
ARG UID=991
ARG GID=991
ENV TOOLCHAIN=stable
ENV TARGET=x86_64-unknown-linux-musl
ENV TOOL=x86_64-linux-musl
RUN \
apt-get update && \
apt-get upgrade -y
RUN \
addgroup --gid "${GID}" build && \
adduser \
--disabled-password \
--gecos "" \
--ingroup build \
--uid "${UID}" \
--home /opt/build \
build
ADD https://sh.rustup.rs /opt/build/rustup.sh
RUN \
chown -R build:build /opt/build
USER build
WORKDIR /opt/build
ENV PATH="$PATH:/opt/build/.cargo/bin"
RUN \
chmod +x rustup.sh && \
./rustup.sh --default-toolchain $TOOLCHAIN --profile minimal -y && \
rustup target add $TARGET
FROM amd64-builder as builder
ARG TAG=master
ARG REPOSITORY=https://git.asonix.dog/asonix/ap-relay
ARG BINARY=relay
RUN \
git clone -b $TAG $REPOSITORY repo
WORKDIR /opt/build/repo
RUN \
cargo build --release --target $TARGET && \
$TOOL-strip target/$TARGET/release/$BINARY
FROM amd64/alpine:3.12
ARG UID=991
ARG GID=991
ARG BINARY=relay
RUN \
apk add tini && \
addgroup --gid $GID relay && \
adduser -D -G relay -u $UID -g "" -h /opt/relay relay && \
chown -R relay:relay /opt/relay
COPY --from=build /relay /usr/bin/relay
EXPOSE 8080
WORKDIR /opt/relay
USER relay
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["relay"]

View file

@ -0,0 +1,72 @@
FROM rustembedded/cross:arm-unknown-linux-musleabihf AS arm32v7-builder
ARG UID=991
ARG GID=991
ENV TOOLCHAIN=stable
ENV TARGET=arm-unknown-linux-musleabihf
ENV TOOL=arm-linux-musleabihf
RUN \
apt-get update && \
apt-get upgrade -y
RUN \
addgroup --gid "${GID}" build && \
adduser \
--disabled-password \
--gecos "" \
--ingroup build \
--uid "${UID}" \
--home /opt/build \
build
ADD https://sh.rustup.rs /opt/build/rustup.sh
RUN \
chown -R build:build /opt/build
USER build
WORKDIR /opt/build
ENV PATH="$PATH:/opt/build/.cargo/bin"
RUN \
chmod +x rustup.sh && \
./rustup.sh --default-toolchain $TOOLCHAIN --profile minimal -y && \
rustup target add $TARGET
FROM arm32v7-builder as builder
ARG TAG=master
ARG REPOSITORY=https://git.asonix.dog/asonix/ap-relay
ARG BINARY=relay
RUN \
git clone -b $TAG $REPOSITORY repo
WORKDIR /opt/build/repo
RUN \
cargo build --release --target $TARGET && \
$TOOL-strip target/$TARGET/release/$BINARY
FROM arm32v7/alpine:3.12
ARG UID=991
ARG GID=991
ARG BINARY=relay
RUN \
apk add tini && \
addgroup --gid $GID relay && \
adduser -D -G relay -u $UID -g "" -h /opt/relay relay && \
chown -R relay:relay /opt/relay
COPY --from=build /relay /usr/bin/relay
EXPOSE 8080
WORKDIR /opt/relay
USER relay
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["relay"]

View file

@ -0,0 +1,72 @@
FROM rustembedded/cross:aarch64-unknown-linux-musl AS aarch64-builder
ARG UID=991
ARG GID=991
ENV TOOLCHAIN=stable
ENV TARGET=aarch64-unknown-linux-musl
ENV TOOL=aarch64-linux-musl
RUN \
apt-get update && \
apt-get upgrade -y
RUN \
addgroup --gid "${GID}" build && \
adduser \
--disabled-password \
--gecos "" \
--ingroup build \
--uid "${UID}" \
--home /opt/build \
build
ADD https://sh.rustup.rs /opt/build/rustup.sh
RUN \
chown -R build:build /opt/build
USER build
WORKDIR /opt/build
ENV PATH="PATH:/opt/build/.cargo/bin"
RUN \
chmod +x rustup.sh && \
./rustup.sh --default-toolchain $TOOLCHAIN --profile minimal -y && \
rustup target add $TARGET
FROM aarch64-builder as builder
ARG TAG=master
ARG REPOSITORY=https://git.asonix.dog/asonix/ap-relay
ARG BINARY=relay
RUN \
git clone -b $TAG $REPOSITORY repo
WORKDIR /opt/build/repo
RUN \
cargo build --release --target $TARGET && \
$TOOL-strip target/$TARGET/release/$BINARY
FROM arm64v8/alpine:3.12
ARG UID=991
ARG GID=991
ARG BINARY=relay
RUN \
apk add tini && \
addgroup --gid $GID relay && \
adduser -D -G relay -u $UID -g "" -h /opt/relay relay && \
chown -R relay:relay /opt/relay
COPY --from=build /relay /usr/bin/relay
EXPOSE 8080
WORKDIR /opt/relay
USER relay
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["relay"]

View file

@ -0,0 +1,11 @@
FROM asonix/diesel-cli:v1.4.0-r1-arm64v8
COPY migrations /migrations
USER root
RUN \
apt-get install -y tini && \
chown -R diesel:diesel /migrations
USER diesel
ENTRYPOINT ["/usr/bin/tini"]
CMD ["diesel", "migration", "run", "--migration-dir", "/migrations"]

72
docker/prod/deploy.sh Executable file
View file

@ -0,0 +1,72 @@
#!/usr/bin/env bash
TAG=$1
MIGRATIONS=$2
function require() {
if [ "$1" = "" ]; then
echo "input '$2' required"
print_help
exit 1
fi
}
function print_help() {
echo "build.sh"
echo ""
echo "Usage:"
echo " build.sh [tag] [migrations]"
echo ""
echo "Args:"
echo " tag: The git tag to create and publish"
echo " migrations: (optional) Whether to build the migrations container as well"
}
function build_image() {
repo=$1
tag=$2
arch=$3
docker build \
--pull \
--build-arg TAG="${tag}" \
-f "Dockerfile.${arch}" \
-t "${repo}:${tag}-${arch}" \
-t "${repo}:latest-${arch}" \
.
docker push "${repo}:${tag}-arm64v8"
docker push "${repo}:latest-arm64v8"
}
require "$TAG" "tag"
if ! docker run --rm -it arm64v8/ubuntu:19.10 /bin/bash -c 'echo "docker is configured correctly"'; then
echo "docker is not configured to run on qemu-emulated architectures, fixing will require sudo"
sudo docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
fi
set -xe
git checkout master
git commit -m "Version $TAG"
git tag $TAG
git push origin $TAG
git push
build_image "asonix/relay" "$TAG" "arm64v8"
build_image "asonix/relay" "$TAG" "arm32v7"
build_image "asonix/relay" "$TAG" "amd64"
./manifest.sh "asonix/relay" "$TAG"
./manifest.sh "asonix/relay" "latest"
if [ "${MIGRATIONS}" = "migrations" ]; then
build_image "asonix/relay-migrations" "$TAG" arm64v8
build_image "asonix/relay-migrations" "$TAG" arm32v7
build_image "asonix/relay-migrations" "$TAG" amd64
./manifest.sh "asonix/relay-migrations" "$TAG"
./manifest.sh "asonix/relay-migrations" "latest"
fi

View file

@ -1,20 +0,0 @@
version: '3.3'
services:
relay:
image: asonix/relay:0.3.115
ports:
- "8079:8079"
restart: always
environment:
- HOSTNAME=relay.my.tld
- ADDR=0.0.0.0
- PORT=8080
- DEBUG=false
- RESTRICTED_MODE=false
- VALIDATE_SIGNATURES=true
- HTTPS=true
- SLED_PATH=/mnt/sled/db-0.34
- PRETTY_LOG=false
- PUBLISH_BLOCKS=true
- API_TOKEN=somepasswordishtoken

43
docker/prod/manifest.sh Executable file
View file

@ -0,0 +1,43 @@
#!/usr/bin/env bash
function require() {
if [ "$1" = "" ]; then
echo "input '$2' required"
print_help
exit 1
fi
}
function print_help() {
echo "deploy.sh"
echo ""
echo "Usage:"
echo " manifest.sh [tag]"
echo ""
echo "Args:"
echo " repo: The docker repository to push the manifest to"
echo " tag: The git tag to be applied to the image manifest"
}
repo=$2
tag=$2
require "$repo" "repo"
require "$tag" "tag"
set -xe
docker manifest create $repo:$tag \
-a $repo:arm64v8-$tag \
-a $repo:arm32v7-$tag \
-a $repo:amd64-$tag
docker manifest annotate $repo:$tag \
$repo:arm64v8-$tag --os linux --arch arm64 --variant v8
docker manifest annotate $repo:$tag \
$repo:arm32v7-$tag --os linux --arch arm --variant v7
docker manifest annotate $repo:$tag \
$repo:amd64-$tag --os linux --arch amd64
docker manifest push $repo:$tag --purge

61
flake.lock generated
View file

@ -1,61 +0,0 @@
{
"nodes": {
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1733550349,
"narHash": "sha256-NcGumB4Lr6KSDq+nIqXtNA8QwAQKDSZT7N9OTGWbTrs=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "e2605d0744c2417b09f8bf850dfca42fcf537d34",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-24.11",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"flake-utils": "flake-utils",
"nixpkgs": "nixpkgs"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

View file

@ -1,34 +0,0 @@
{
description = "relay";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
flake-utils.url = "github:numtide/flake-utils";
};
outputs = { self, nixpkgs, flake-utils }:
flake-utils.lib.eachDefaultSystem (system:
let
pkgs = import nixpkgs {
inherit system;
};
in
{
packages = rec {
relay = pkgs.callPackage ./relay.nix { };
default = relay;
};
apps = rec {
dev = flake-utils.lib.mkApp { drv = self.packages.${system}.pict-rs-proxy; };
default = dev;
};
devShell = with pkgs; mkShell {
nativeBuildInputs = [ cargo cargo-outdated cargo-zigbuild clippy gcc protobuf rust-analyzer rustc rustfmt ];
RUST_SRC_PATH = "${pkgs.rust.packages.stable.rustPlatform.rustLibSrc}";
};
});
}

0
migrations/.gitkeep Normal file
View file

View file

@ -0,0 +1,6 @@
-- This file was automatically created by Diesel to setup helper functions
-- and other internal bookkeeping. This file is safe to edit, any future
-- changes will be added to existing projects as new migrations.
DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass);
DROP FUNCTION IF EXISTS diesel_set_updated_at();

View file

@ -0,0 +1,36 @@
-- This file was automatically created by Diesel to setup helper functions
-- and other internal bookkeeping. This file is safe to edit, any future
-- changes will be added to existing projects as new migrations.
-- Sets up a trigger for the given table to automatically set a column called
-- `updated_at` whenever the row is modified (unless `updated_at` was included
-- in the modified columns)
--
-- # Example
--
-- ```sql
-- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW());
--
-- SELECT diesel_manage_updated_at('users');
-- ```
CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$
BEGIN
EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s
FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl);
END;
$$ LANGUAGE plpgsql;
CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$
BEGIN
IF (
NEW IS DISTINCT FROM OLD AND
NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at
) THEN
NEW.updated_at := current_timestamp;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;

View file

@ -0,0 +1,3 @@
-- This file should undo anything in `up.sql`
DROP INDEX listeners_actor_id_index;
DROP TABLE listeners;

View file

@ -0,0 +1,11 @@
-- Your SQL goes here
CREATE TABLE listeners (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
actor_id TEXT UNIQUE NOT NULL,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP
);
CREATE INDEX listeners_actor_id_index ON listeners(actor_id);
SELECT diesel_manage_updated_at('listeners');

View file

@ -0,0 +1,3 @@
-- This file should undo anything in `up.sql`
DROP INDEX blocks_domain_name_index;
DROP TABLE blocks;

View file

@ -0,0 +1,11 @@
-- Your SQL goes here
CREATE TABLE blocks (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
domain_name TEXT UNIQUE NOT NULL,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP
);
CREATE INDEX blocks_domain_name_index ON blocks(domain_name);
SELECT diesel_manage_updated_at('blocks');

View file

@ -0,0 +1,3 @@
-- This file should undo anything in `up.sql`
DROP INDEX whitelists_domain_name_index;
DROP TABLE whitelists;

View file

@ -0,0 +1,11 @@
-- Your SQL goes here
CREATE TABLE whitelists (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
domain_name TEXT UNIQUE NOT NULL,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP
);
CREATE INDEX whitelists_domain_name_index ON whitelists(domain_name);
SELECT diesel_manage_updated_at('whitelists');

View file

@ -0,0 +1,3 @@
-- This file should undo anything in `up.sql`
DROP INDEX settings_key_index;
DROP TABLE settings;

View file

@ -0,0 +1,12 @@
-- Your SQL goes here
CREATE TABLE settings (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
key TEXT UNIQUE NOT NULL,
value TEXT NOT NULL,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP
);
CREATE INDEX settings_key_index ON settings(key);
SELECT diesel_manage_updated_at('settings');

View file

@ -0,0 +1,8 @@
-- This file should undo anything in `up.sql`
DROP TRIGGER IF EXISTS whitelists_notify ON whitelists;
DROP TRIGGER IF EXISTS blocks_notify ON blocks;
DROP TRIGGER IF EXISTS listeners_notify ON listeners;
DROP FUNCTION IF EXISTS invoke_whitelists_trigger();
DROP FUNCTION IF EXISTS invoke_blocks_trigger();
DROP FUNCTION IF EXISTS invoke_listeners_trigger();

View file

@ -0,0 +1,99 @@
-- Your SQL goes here
CREATE OR REPLACE FUNCTION invoke_listeners_trigger ()
RETURNS TRIGGER
LANGUAGE plpgsql
AS $$
DECLARE
rec RECORD;
channel TEXT;
payload TEXT;
BEGIN
case TG_OP
WHEN 'INSERT' THEN
rec := NEW;
channel := 'new_listeners';
payload := NEW.actor_id;
WHEN 'DELETE' THEN
rec := OLD;
channel := 'rm_listeners';
payload := OLD.actor_id;
ELSE
RAISE EXCEPTION 'Unknown TG_OP: "%". Should not occur!', TG_OP;
END CASE;
PERFORM pg_notify(channel, payload::TEXT);
RETURN rec;
END;
$$;
CREATE OR REPLACE FUNCTION invoke_blocks_trigger ()
RETURNS TRIGGER
LANGUAGE plpgsql
AS $$
DECLARE
rec RECORD;
channel TEXT;
payload TEXT;
BEGIN
case TG_OP
WHEN 'INSERT' THEN
rec := NEW;
channel := 'new_blocks';
payload := NEW.domain_name;
WHEN 'DELETE' THEN
rec := OLD;
channel := 'rm_blocks';
payload := OLD.domain_name;
ELSE
RAISE EXCEPTION 'Unknown TG_OP: "%". Should not occur!', TG_OP;
END CASE;
PERFORM pg_notify(channel, payload::TEXT);
RETURN NULL;
END;
$$;
CREATE OR REPLACE FUNCTION invoke_whitelists_trigger ()
RETURNS TRIGGER
LANGUAGE plpgsql
AS $$
DECLARE
rec RECORD;
channel TEXT;
payload TEXT;
BEGIN
case TG_OP
WHEN 'INSERT' THEN
rec := NEW;
channel := 'new_whitelists';
payload := NEW.domain_name;
WHEN 'DELETE' THEN
rec := OLD;
channel := 'rm_whitelists';
payload := OLD.domain_name;
ELSE
RAISE EXCEPTION 'Unknown TG_OP: "%". Should not occur!', TG_OP;
END CASE;
PERFORM pg_notify(channel, payload::TEXT);
RETURN rec;
END;
$$;
CREATE TRIGGER listeners_notify
AFTER INSERT OR UPDATE OR DELETE
ON listeners
FOR EACH ROW
EXECUTE PROCEDURE invoke_listeners_trigger();
CREATE TRIGGER blocks_notify
AFTER INSERT OR UPDATE OR DELETE
ON blocks
FOR EACH ROW
EXECUTE PROCEDURE invoke_blocks_trigger();
CREATE TRIGGER whitelists_notify
AFTER INSERT OR UPDATE OR DELETE
ON whitelists
FOR EACH ROW
EXECUTE PROCEDURE invoke_whitelists_trigger();

View file

@ -0,0 +1,3 @@
-- This file should undo anything in `up.sql`
DROP INDEX jobs_queue_status_index;
DROP TABLE jobs;

View file

@ -0,0 +1,17 @@
-- Your SQL goes here
CREATE TABLE jobs (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
job_id UUID UNIQUE NOT NULL,
job_queue TEXT NOT NULL,
job_timeout BIGINT NOT NULL,
job_updated TIMESTAMP NOT NULL,
job_status TEXT NOT NULL,
job_value JSONB NOT NULL,
job_next_run TIMESTAMP,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP NOT NULL DEFAULT NOW()
);
CREATE INDEX jobs_queue_status_index ON jobs(job_queue, job_status);
SELECT diesel_manage_updated_at('jobs');

View file

@ -0,0 +1,4 @@
-- This file should undo anything in `up.sql`
DROP TRIGGER IF EXISTS actors_notify ON actors;
DROP FUNCTION IF EXISTS invoke_actors_trigger();
DROP TABLE actors;

View file

@ -0,0 +1,49 @@
-- Your SQL goes here
CREATE TABLE actors (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
actor_id TEXT UNIQUE NOT NULL,
public_key TEXT NOT NULL,
public_key_id TEXT UNIQUE NOT NULL,
listener_id UUID NOT NULL REFERENCES listeners(id) ON DELETE CASCADE,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP NOT NULL DEFAULT NOW()
);
SELECT diesel_manage_updated_at('actors');
CREATE OR REPLACE FUNCTION invoke_actors_trigger ()
RETURNS TRIGGER
LANGUAGE plpgsql
AS $$
DECLARE
rec RECORD;
channel TEXT;
payload TEXT;
BEGIN
case TG_OP
WHEN 'INSERT' THEN
rec := NEW;
channel := 'new_actors';
payload := NEW.actor_id;
WHEN 'UPDATE' THEN
rec := NEW;
channel := 'new_actors';
payload := NEW.actor_id;
WHEN 'DELETE' THEN
rec := OLD;
channel := 'rm_actors';
payload := OLD.actor_id;
ELSE
RAISE EXCEPTION 'Unknown TG_OP: "%". Should not occur!', TG_OP;
END CASE;
PERFORM pg_notify(channel, payload::TEXT);
RETURN rec;
END;
$$;
CREATE TRIGGER actors_notify
AFTER INSERT OR UPDATE OR DELETE
ON actors
FOR EACH ROW
EXECUTE PROCEDURE invoke_actors_trigger();

View file

@ -0,0 +1,2 @@
-- This file should undo anything in `up.sql`
DROP TABLE nodes;

View file

@ -0,0 +1,12 @@
-- Your SQL goes here
CREATE TABLE nodes (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
listener_id UUID NOT NULL REFERENCES listeners(id) ON DELETE CASCADE,
nodeinfo JSONB,
instance JSONB,
contact JSONB,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP NOT NULL DEFAULT NOW()
);
SELECT diesel_manage_updated_at('nodes');

View file

@ -0,0 +1,3 @@
-- This file should undo anything in `up.sql`
DROP TRIGGER IF EXISTS nodes_notify ON nodes;
DROP FUNCTION IF EXISTS invoke_nodes_trigger();

View file

@ -0,0 +1,37 @@
-- Your SQL goes here
CREATE OR REPLACE FUNCTION invoke_nodes_trigger ()
RETURNS TRIGGER
LANGUAGE plpgsql
AS $$
DECLARE
rec RECORD;
channel TEXT;
payload TEXT;
BEGIN
case TG_OP
WHEN 'INSERT' THEN
rec := NEW;
channel := 'new_nodes';
payload := NEW.listener_id;
WHEN 'UPDATE' THEN
rec := NEW;
channel := 'new_nodes';
payload := NEW.listener_id;
WHEN 'DELETE' THEN
rec := OLD;
channel := 'rm_nodes';
payload := OLD.listener_id;
ELSE
RAISE EXCEPTION 'Unknown TG_OP: "%". Should not occur!', TG_OP;
END CASE;
PERFORM pg_notify(channel, payload::TEXT);
RETURN rec;
END;
$$;
CREATE TRIGGER nodes_notify
AFTER INSERT OR UPDATE OR DELETE
ON nodes
FOR EACH ROW
EXECUTE PROCEDURE invoke_nodes_trigger();

View file

@ -0,0 +1,2 @@
-- This file should undo anything in `up.sql`
ALTER TABLE nodes DROP CONSTRAINT nodes_listener_ids_unique;

View file

@ -0,0 +1,2 @@
-- Your SQL goes here
ALTER TABLE nodes ADD CONSTRAINT nodes_listener_ids_unique UNIQUE (listener_id);

View file

@ -0,0 +1,2 @@
-- This file should undo anything in `up.sql`
DROP TABLE media;

View file

@ -0,0 +1,10 @@
-- Your SQL goes here
CREATE TABLE media (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
media_id UUID UNIQUE NOT NULL,
url TEXT UNIQUE NOT NULL,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP NOT NULL DEFAULT NOW()
);
SELECT diesel_manage_updated_at('media');

View file

@ -1,23 +0,0 @@
{ lib
, nixosTests
, rustPlatform
}:
rustPlatform.buildRustPackage {
pname = "relay";
version = "0.3.116";
src = ./.;
cargoLock.lockFile = ./Cargo.lock;
RUSTFLAGS = "--cfg tokio_unstable";
nativeBuildInputs = [ ];
passthru.tests = { inherit (nixosTests) relay; };
meta = with lib; {
description = "An ActivityPub relay";
homepage = "https://git.asonix.dog/asonix/relay";
license = with licenses; [ agpl3Plus ];
};
}

View file

@ -20,6 +20,7 @@ body * {
}
header {
.header-text {
max-width: 700px;
margin: auto;
@ -41,7 +42,7 @@ header {
}
}
article {
section {
background-color: #fff;
color: #333;
border: 1px solid #e5e5e5;
@ -51,16 +52,8 @@ article {
max-width: 700px;
padding-bottom: 32px;
section {
border-bottom: 1px solid #e5e5e5;
> h4:first-child,
> p:first-child {
margin-top: 0;
}
> p:last-child {
margin-bottom: 0;
}
> p:first-child {
margin-top: 0;
}
h3 {
@ -75,20 +68,18 @@ article {
li {
padding-top: 36px;
border-bottom: 1px solid #e5e5e5;
}
.padded {
padding: 0 24px;
}
.local-explainer,
.joining {
padding: 24px;
}
a {
transition: color .2s cubic-bezier(.3,0,.5,1);
&,
&:focus,
&:active {
@ -169,25 +160,6 @@ footer {
.instance-admin {
margin: 24px 0;
}
.description .please-stay {
h3 {
padding: 0;
margin: 0;
border-bottom: none;
}
ul {
list-style: disc;
padding-left: 24px;
li {
padding: 0;
}
}
article section {
border-bottom: none;
}
}
}
a {
@ -236,7 +208,7 @@ footer {
padding: 24px;
}
article {
section {
border-left: none;
border-right: none;
border-radius: 0;
@ -266,14 +238,3 @@ footer {
}
}
}
@media(max-width: 360px) {
.admin {
flex-direction: column;
}
.right {
margin: 16px;
margin-top: 0;
}
}

View file

@ -1,32 +0,0 @@
use activitystreams::iri_string::types::IriString;
use std::collections::{BTreeMap, BTreeSet};
use time::OffsetDateTime;
pub mod client;
pub mod routes;
#[derive(serde::Deserialize, serde::Serialize)]
pub(crate) struct Domains {
domains: Vec<String>,
}
#[derive(serde::Deserialize, serde::Serialize)]
pub(crate) struct AllowedDomains {
pub(crate) allowed_domains: Vec<String>,
}
#[derive(serde::Deserialize, serde::Serialize)]
pub(crate) struct BlockedDomains {
pub(crate) blocked_domains: Vec<String>,
}
#[derive(serde::Deserialize, serde::Serialize)]
pub(crate) struct ConnectedActors {
pub(crate) connected_actors: Vec<IriString>,
}
#[derive(serde::Deserialize, serde::Serialize)]
pub(crate) struct LastSeen {
pub(crate) last_seen: BTreeMap<OffsetDateTime, BTreeSet<String>>,
pub(crate) never: Vec<String>,
}

View file

@ -1,133 +0,0 @@
use crate::{
admin::{AllowedDomains, BlockedDomains, ConnectedActors, Domains, LastSeen},
collector::Snapshot,
config::{AdminUrlKind, Config},
error::{Error, ErrorKind},
extractors::XApiToken,
};
use reqwest_middleware::ClientWithMiddleware;
use serde::de::DeserializeOwned;
pub(crate) async fn allow(
client: &ClientWithMiddleware,
config: &Config,
domains: Vec<String>,
) -> Result<(), Error> {
post_domains(client, config, domains, AdminUrlKind::Allow).await
}
pub(crate) async fn disallow(
client: &ClientWithMiddleware,
config: &Config,
domains: Vec<String>,
) -> Result<(), Error> {
post_domains(client, config, domains, AdminUrlKind::Disallow).await
}
pub(crate) async fn block(
client: &ClientWithMiddleware,
config: &Config,
domains: Vec<String>,
) -> Result<(), Error> {
post_domains(client, config, domains, AdminUrlKind::Block).await
}
pub(crate) async fn unblock(
client: &ClientWithMiddleware,
config: &Config,
domains: Vec<String>,
) -> Result<(), Error> {
post_domains(client, config, domains, AdminUrlKind::Unblock).await
}
pub(crate) async fn allowed(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<AllowedDomains, Error> {
get_results(client, config, AdminUrlKind::Allowed).await
}
pub(crate) async fn blocked(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<BlockedDomains, Error> {
get_results(client, config, AdminUrlKind::Blocked).await
}
pub(crate) async fn connected(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<ConnectedActors, Error> {
get_results(client, config, AdminUrlKind::Connected).await
}
pub(crate) async fn stats(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<Snapshot, Error> {
get_results(client, config, AdminUrlKind::Stats).await
}
pub(crate) async fn last_seen(
client: &ClientWithMiddleware,
config: &Config,
) -> Result<LastSeen, Error> {
get_results(client, config, AdminUrlKind::LastSeen).await
}
async fn get_results<T: DeserializeOwned>(
client: &ClientWithMiddleware,
config: &Config,
url_kind: AdminUrlKind,
) -> Result<T, Error> {
let x_api_token = config.x_api_token().ok_or(ErrorKind::MissingApiToken)?;
let iri = config.generate_admin_url(url_kind);
let res = client
.get(iri.as_str())
.header(XApiToken::http1_name(), x_api_token.to_string())
.send()
.await
.map_err(|e| ErrorKind::SendRequest(iri.to_string(), e.to_string()))?;
if !res.status().is_success() {
return Err(ErrorKind::Status(
iri.to_string(),
crate::http1::status_to_http02(res.status()),
)
.into());
}
let t = res
.json()
.await
.map_err(|e| ErrorKind::ReceiveResponse(iri.to_string(), e.to_string()))?;
Ok(t)
}
async fn post_domains(
client: &ClientWithMiddleware,
config: &Config,
domains: Vec<String>,
url_kind: AdminUrlKind,
) -> Result<(), Error> {
let x_api_token = config.x_api_token().ok_or(ErrorKind::MissingApiToken)?;
let iri = config.generate_admin_url(url_kind);
let res = client
.post(iri.as_str())
.header(XApiToken::http1_name(), x_api_token.to_string())
.json(&Domains { domains })
.send()
.await
.map_err(|e| ErrorKind::SendRequest(iri.to_string(), e.to_string()))?;
if !res.status().is_success() {
tracing::warn!("Failed to allow domains");
}
Ok(())
}

View file

@ -1,90 +0,0 @@
use crate::{
admin::{AllowedDomains, BlockedDomains, ConnectedActors, Domains, LastSeen},
collector::{MemoryCollector, Snapshot},
error::Error,
extractors::Admin,
};
use actix_web::{
web::{Data, Json},
HttpResponse,
};
use std::collections::{BTreeMap, BTreeSet};
use time::OffsetDateTime;
pub(crate) async fn allow(
admin: Admin,
Json(Domains { domains }): Json<Domains>,
) -> Result<HttpResponse, Error> {
admin.db_ref().add_allows(domains).await?;
Ok(HttpResponse::NoContent().finish())
}
pub(crate) async fn disallow(
admin: Admin,
Json(Domains { domains }): Json<Domains>,
) -> Result<HttpResponse, Error> {
admin.db_ref().remove_allows(domains).await?;
Ok(HttpResponse::NoContent().finish())
}
pub(crate) async fn block(
admin: Admin,
Json(Domains { domains }): Json<Domains>,
) -> Result<HttpResponse, Error> {
admin.db_ref().add_blocks(domains).await?;
Ok(HttpResponse::NoContent().finish())
}
pub(crate) async fn unblock(
admin: Admin,
Json(Domains { domains }): Json<Domains>,
) -> Result<HttpResponse, Error> {
admin.db_ref().remove_blocks(domains).await?;
Ok(HttpResponse::NoContent().finish())
}
pub(crate) async fn allowed(admin: Admin) -> Result<Json<AllowedDomains>, Error> {
let allowed_domains = admin.db_ref().allows().await?;
Ok(Json(AllowedDomains { allowed_domains }))
}
pub(crate) async fn blocked(admin: Admin) -> Result<Json<BlockedDomains>, Error> {
let blocked_domains = admin.db_ref().blocks().await?;
Ok(Json(BlockedDomains { blocked_domains }))
}
pub(crate) async fn connected(admin: Admin) -> Result<Json<ConnectedActors>, Error> {
let connected_actors = admin.db_ref().connected_ids().await?;
Ok(Json(ConnectedActors { connected_actors }))
}
pub(crate) async fn stats(
_admin: Admin,
collector: Data<MemoryCollector>,
) -> Result<Json<Snapshot>, Error> {
Ok(Json(collector.snapshot()))
}
pub(crate) async fn last_seen(admin: Admin) -> Result<Json<LastSeen>, Error> {
let nodes = admin.db_ref().last_seen().await?;
let mut last_seen: BTreeMap<OffsetDateTime, BTreeSet<String>> = BTreeMap::new();
let mut never = Vec::new();
for (domain, datetime) in nodes {
if let Some(datetime) = datetime {
last_seen.entry(datetime).or_default().insert(domain);
} else {
never.push(domain);
}
}
Ok(Json(LastSeen { last_seen, never }))
}

View file

@ -1,29 +1,19 @@
use activitystreams::{
use activitystreams_ext::{Ext1, UnparsedExtension};
use activitystreams_new::{
activity::ActorAndObject,
actor::{Actor, ApActor},
iri_string::types::IriString,
primitives::XsdAnyUri,
unparsed::UnparsedMutExt,
};
use activitystreams_ext::{Ext1, UnparsedExtension};
#[derive(Clone, serde::Deserialize, serde::Serialize)]
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
#[serde(rename_all = "camelCase")]
pub struct PublicKeyInner {
pub id: IriString,
pub owner: IriString,
pub id: XsdAnyUri,
pub owner: XsdAnyUri,
pub public_key_pem: String,
}
impl std::fmt::Debug for PublicKeyInner {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("PublicKeyInner")
.field("id", &self.id.to_string())
.field("owner", &self.owner.to_string())
.field("public_key_pem", &self.public_key_pem)
.finish()
}
}
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
#[serde(rename_all = "camelCase")]
pub struct PublicKey {
@ -34,13 +24,11 @@ pub struct PublicKey {
#[serde(rename_all = "PascalCase")]
pub enum ValidTypes {
Accept,
Add,
Announce,
Create,
Delete,
Follow,
Reject,
Remove,
Undo,
Update,
}

View file

@ -1,65 +1,54 @@
use clap::Parser;
use structopt::StructOpt;
#[derive(Debug, Parser)]
#[derive(Debug, StructOpt)]
#[structopt(name = "relay", about = "An activitypub relay")]
pub(crate) struct Args {
#[arg(short, help = "A list of domains that should be blocked")]
pub struct Args {
#[structopt(short, help = "A list of domains that should be blocked")]
blocks: Vec<String>,
#[arg(short, help = "A list of domains that should be allowed")]
allowed: Vec<String>,
#[structopt(short, help = "A list of domains that should be whitelisted")]
whitelists: Vec<String>,
#[arg(short, long, help = "Undo allowing or blocking domains")]
#[structopt(short, long, help = "Undo whitelisting or blocking domains")]
undo: bool,
#[arg(short, long, help = "List allowed and blocked domains")]
list: bool,
#[arg(short, long, help = "Get statistics from the server")]
stats: bool,
#[arg(
#[structopt(
short,
long,
help = "List domains by when they were last succesfully contacted"
help = "Only process background jobs, do not start the relay server"
)]
contacted: bool,
jobs_only: bool,
#[structopt(
short,
long,
help = "Only run the relay server, do not process background jobs"
)]
no_jobs: bool,
}
impl Args {
pub(crate) fn any(&self) -> bool {
!self.blocks.is_empty()
|| !self.allowed.is_empty()
|| self.list
|| self.stats
|| self.contacted
pub fn new() -> Self {
Self::from_args()
}
pub(crate) fn new() -> Self {
Self::parse()
}
pub(crate) fn blocks(&self) -> &[String] {
pub fn blocks(&self) -> &[String] {
&self.blocks
}
pub(crate) fn allowed(&self) -> &[String] {
&self.allowed
pub fn whitelists(&self) -> &[String] {
&self.whitelists
}
pub(crate) fn undo(&self) -> bool {
pub fn undo(&self) -> bool {
self.undo
}
pub(crate) fn list(&self) -> bool {
self.list
pub fn jobs_only(&self) -> bool {
self.jobs_only
}
pub(crate) fn stats(&self) -> bool {
self.stats
}
pub(crate) fn contacted(&self) -> bool {
self.contacted
pub fn no_jobs(&self) -> bool {
self.no_jobs
}
}

View file

@ -1,53 +1,8 @@
use ructe::Ructe;
use std::{fs::File, io::Read, path::Path, process::Command};
fn git_info() {
if let Ok(output) = Command::new("git").args(["rev-parse", "HEAD"]).output() {
if output.status.success() {
let git_hash = String::from_utf8_lossy(&output.stdout);
println!("cargo:rustc-env=GIT_HASH={git_hash}");
println!("cargo:rustc-env=GIT_SHORT_HASH={}", &git_hash[..8])
}
}
if let Ok(output) = Command::new("git")
.args(["rev-parse", "--abbrev-ref", "HEAD"])
.output()
{
if output.status.success() {
let git_branch = String::from_utf8_lossy(&output.stdout);
println!("cargo:rustc-env=GIT_BRANCH={git_branch}");
}
}
}
fn version_info() -> color_eyre::Result<()> {
let cargo_toml = Path::new(&std::env::var("CARGO_MANIFEST_DIR")?).join("Cargo.toml");
let mut file = File::open(cargo_toml)?;
let mut cargo_data = String::new();
file.read_to_string(&mut cargo_data)?;
let data: toml::Value = toml::from_str(&cargo_data)?;
if let Some(version) = data["package"]["version"].as_str() {
println!("cargo:rustc-env=PKG_VERSION={version}");
}
if let Some(name) = data["package"]["name"].as_str() {
println!("cargo:rustc-env=PKG_NAME={name}");
}
Ok(())
}
fn main() -> color_eyre::Result<()> {
fn main() -> Result<(), anyhow::Error> {
dotenv::dotenv().ok();
git_info();
version_info()?;
let mut ructe = Ructe::from_env()?;
let mut statics = ructe.statics()?;
statics.add_sass_file("scss/index.scss")?;

View file

@ -1,425 +0,0 @@
use metrics::{Key, Metadata, Recorder, SetRecorderError};
use metrics_util::{
registry::{AtomicStorage, GenerationalStorage, Recency, Registry},
MetricKindMask, Summary,
};
use quanta::Clock;
use std::{
collections::{BTreeMap, HashMap},
sync::{atomic::Ordering, Arc, RwLock},
time::Duration,
};
const SECONDS: u64 = 1;
const MINUTES: u64 = 60 * SECONDS;
const HOURS: u64 = 60 * MINUTES;
const DAYS: u64 = 24 * HOURS;
pub(crate) fn recordable(len: usize) -> u32 {
((len as u64) % u64::from(u32::MAX)) as u32
}
type DistributionMap = BTreeMap<Vec<(String, String)>, Summary>;
#[derive(Clone)]
pub struct MemoryCollector {
inner: Arc<Inner>,
}
struct Inner {
descriptions: RwLock<HashMap<String, metrics::SharedString>>,
distributions: RwLock<HashMap<String, DistributionMap>>,
recency: Recency<Key>,
registry: Registry<Key, GenerationalStorage<AtomicStorage>>,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
struct Counter {
labels: BTreeMap<String, String>,
value: u64,
}
impl std::fmt::Display for Counter {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let labels = self
.labels
.iter()
.map(|(k, v)| format!("{k}: {v}"))
.collect::<Vec<_>>()
.join(", ");
write!(f, "{labels} - {}", self.value)
}
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
struct Gauge {
labels: BTreeMap<String, String>,
value: f64,
}
impl std::fmt::Display for Gauge {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let labels = self
.labels
.iter()
.map(|(k, v)| format!("{k}: {v}"))
.collect::<Vec<_>>()
.join(", ");
write!(f, "{labels} - {}", self.value)
}
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
struct Histogram {
labels: BTreeMap<String, String>,
value: Vec<(f64, Option<f64>)>,
}
impl std::fmt::Display for Histogram {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let labels = self
.labels
.iter()
.map(|(k, v)| format!("{k}: {v}"))
.collect::<Vec<_>>()
.join(", ");
let value = self
.value
.iter()
.map(|(k, v)| {
if let Some(v) = v {
format!("{k}: {v:.6}")
} else {
format!("{k}: None,")
}
})
.collect::<Vec<_>>()
.join(", ");
write!(f, "{labels} - {value}")
}
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
pub(crate) struct Snapshot {
counters: HashMap<String, Vec<Counter>>,
gauges: HashMap<String, Vec<Gauge>>,
histograms: HashMap<String, Vec<Histogram>>,
}
const PAIRS: [((&str, &str), &str); 2] = [
(
(
"background-jobs.worker.started",
"background-jobs.worker.finished",
),
"background-jobs.worker.running",
),
(
(
"background-jobs.job.started",
"background-jobs.job.finished",
),
"background-jobs.job.running",
),
];
#[derive(Default)]
struct MergeCounter {
start: Option<Counter>,
finish: Option<Counter>,
}
impl MergeCounter {
fn merge(self) -> Option<Counter> {
match (self.start, self.finish) {
(Some(start), Some(end)) => Some(Counter {
labels: start.labels,
value: start.value.saturating_sub(end.value),
}),
(Some(only), None) => Some(only),
(None, Some(only)) => Some(Counter {
labels: only.labels,
value: 0,
}),
(None, None) => None,
}
}
}
impl Snapshot {
pub(crate) fn present(self) {
if !self.counters.is_empty() {
println!("Counters");
let mut merging = HashMap::new();
for (key, counters) in self.counters {
if let Some(((start, _), name)) = PAIRS
.iter()
.find(|((start, finish), _)| *start == key || *finish == key)
{
let entry = merging.entry(name).or_insert_with(HashMap::new);
for counter in counters {
let merge_counter = entry
.entry(counter.labels.clone())
.or_insert_with(MergeCounter::default);
if key == *start {
merge_counter.start = Some(counter);
} else {
merge_counter.finish = Some(counter);
}
}
continue;
}
println!("\t{key}");
for counter in counters {
println!("\t\t{counter}");
}
}
for (key, counters) in merging {
println!("\t{key}");
for (_, counter) in counters {
if let Some(counter) = counter.merge() {
println!("\t\t{counter}");
}
}
}
}
if !self.gauges.is_empty() {
println!("Gauges");
for (key, gauges) in self.gauges {
println!("\t{key}");
for gauge in gauges {
println!("\t\t{gauge}");
}
}
}
if !self.histograms.is_empty() {
println!("Histograms");
for (key, histograms) in self.histograms {
println!("\t{key}");
for histogram in histograms {
println!("\t\t{histogram}");
}
}
}
}
}
fn key_to_parts(key: &Key) -> (String, Vec<(String, String)>) {
let labels = key
.labels()
.map(|label| (label.key().to_string(), label.value().to_string()))
.collect();
let name = key.name().to_string();
(name, labels)
}
impl Inner {
fn snapshot_counters(&self) -> HashMap<String, Vec<Counter>> {
let mut counters = HashMap::new();
for (key, counter) in self.registry.get_counter_handles() {
let gen = counter.get_generation();
if !self.recency.should_store_counter(&key, gen, &self.registry) {
continue;
}
let (name, labels) = key_to_parts(&key);
let value = counter.get_inner().load(Ordering::Acquire);
counters.entry(name).or_insert_with(Vec::new).push(Counter {
labels: labels.into_iter().collect(),
value,
});
}
counters
}
fn snapshot_gauges(&self) -> HashMap<String, Vec<Gauge>> {
let mut gauges = HashMap::new();
for (key, gauge) in self.registry.get_gauge_handles() {
let gen = gauge.get_generation();
if !self.recency.should_store_gauge(&key, gen, &self.registry) {
continue;
}
let (name, labels) = key_to_parts(&key);
let value = f64::from_bits(gauge.get_inner().load(Ordering::Acquire));
gauges.entry(name).or_insert_with(Vec::new).push(Gauge {
labels: labels.into_iter().collect(),
value,
})
}
gauges
}
fn snapshot_histograms(&self) -> HashMap<String, Vec<Histogram>> {
for (key, histogram) in self.registry.get_histogram_handles() {
let gen = histogram.get_generation();
let (name, labels) = key_to_parts(&key);
if !self
.recency
.should_store_histogram(&key, gen, &self.registry)
{
let mut d = self.distributions.write().unwrap();
let delete_by_name = if let Some(by_name) = d.get_mut(&name) {
by_name.remove(&labels);
by_name.is_empty()
} else {
false
};
drop(d);
if delete_by_name {
self.descriptions.write().unwrap().remove(&name);
}
continue;
}
let mut d = self.distributions.write().unwrap();
let outer_entry = d.entry(name.clone()).or_default();
let entry = outer_entry
.entry(labels)
.or_insert_with(Summary::with_defaults);
histogram.get_inner().clear_with(|samples| {
for sample in samples {
entry.add(*sample);
}
});
let mut total_len = 0;
for dist_map in d.values() {
total_len += dist_map.len();
}
metrics::gauge!("relay.collector.distributions.size").set(recordable(total_len));
}
let d = self.distributions.read().unwrap().clone();
d.into_iter()
.map(|(key, value)| {
(
key,
value
.into_iter()
.map(|(labels, summary)| Histogram {
labels: labels.into_iter().collect(),
value: [0.001, 0.01, 0.05, 0.1, 0.5, 0.9, 0.99, 1.0]
.into_iter()
.map(|q| (q, summary.quantile(q)))
.collect(),
})
.collect(),
)
})
.collect()
}
fn snapshot(&self) -> Snapshot {
Snapshot {
counters: self.snapshot_counters(),
gauges: self.snapshot_gauges(),
histograms: self.snapshot_histograms(),
}
}
}
impl MemoryCollector {
pub(crate) fn new() -> Self {
MemoryCollector {
inner: Arc::new(Inner {
descriptions: Default::default(),
distributions: Default::default(),
recency: Recency::new(
Clock::new(),
MetricKindMask::ALL,
Some(Duration::from_secs(5 * DAYS)),
),
registry: Registry::new(GenerationalStorage::atomic()),
}),
}
}
pub(crate) fn snapshot(&self) -> Snapshot {
self.inner.snapshot()
}
fn add_description_if_missing(
&self,
key: &metrics::KeyName,
description: metrics::SharedString,
) {
let mut d = self.inner.descriptions.write().unwrap();
d.entry(key.as_str().to_owned()).or_insert(description);
metrics::gauge!("relay.collector.descriptions.size").set(recordable(d.len()));
}
pub(crate) fn install(&self) -> Result<(), SetRecorderError<Self>> {
metrics::set_global_recorder(self.clone())
}
}
impl Recorder for MemoryCollector {
fn describe_counter(
&self,
key: metrics::KeyName,
_: Option<metrics::Unit>,
description: metrics::SharedString,
) {
self.add_description_if_missing(&key, description)
}
fn describe_gauge(
&self,
key: metrics::KeyName,
_: Option<metrics::Unit>,
description: metrics::SharedString,
) {
self.add_description_if_missing(&key, description)
}
fn describe_histogram(
&self,
key: metrics::KeyName,
_: Option<metrics::Unit>,
description: metrics::SharedString,
) {
self.add_description_if_missing(&key, description)
}
fn register_counter(&self, key: &Key, _: &Metadata<'_>) -> metrics::Counter {
self.inner
.registry
.get_or_create_counter(key, |c| c.clone().into())
}
fn register_gauge(&self, key: &Key, _: &Metadata<'_>) -> metrics::Gauge {
self.inner
.registry
.get_or_create_gauge(key, |c| c.clone().into())
}
fn register_histogram(&self, key: &Key, _: &Metadata<'_>) -> metrics::Histogram {
self.inner
.registry
.get_or_create_histogram(key, |c| c.clone().into())
}
}

View file

@ -1,102 +1,41 @@
use crate::{
error::Error,
extractors::{AdminConfig, XApiToken},
};
use activitystreams::{
iri,
iri_string::{
format::ToDedicatedString,
resolve::FixedBaseResolver,
types::{IriAbsoluteString, IriFragmentStr, IriRelativeStr, IriString},
},
};
use crate::{data::ActorCache, error::MyError, middleware::MyVerify, requests::Requests};
use activitystreams_new::{primitives::XsdAnyUri, uri};
use config::Environment;
use http_signature_normalization_actix::{digest::ring::Sha256, prelude::VerifyDigest};
use rustls::sign::CertifiedKey;
use std::{
net::{IpAddr, SocketAddr},
path::PathBuf,
};
use http_signature_normalization_actix::prelude::{VerifyDigest, VerifySignature};
use sha2::{Digest, Sha256};
use std::net::IpAddr;
use uuid::Uuid;
#[derive(Clone, Debug, serde::Deserialize)]
pub(crate) struct ParsedConfig {
pub struct ParsedConfig {
hostname: String,
addr: IpAddr,
port: u16,
debug: bool,
restricted_mode: bool,
whitelist_mode: bool,
validate_signatures: bool,
https: bool,
database_url: String,
pretty_log: bool,
publish_blocks: bool,
sled_path: PathBuf,
source_repo: IriString,
repository_commit_base: String,
opentelemetry_url: Option<IriString>,
telegram_token: Option<String>,
telegram_admin_handle: Option<String>,
api_token: Option<String>,
tls_key: Option<PathBuf>,
tls_cert: Option<PathBuf>,
footer_blurb: Option<String>,
local_domains: Option<String>,
local_blurb: Option<String>,
prometheus_addr: Option<IpAddr>,
prometheus_port: Option<u16>,
deliver_concurrency: u64,
client_timeout: u64,
proxy_url: Option<IriString>,
proxy_username: Option<String>,
proxy_password: Option<String>,
signature_threads: Option<usize>,
max_connections: usize,
}
#[derive(Clone)]
#[derive(Clone, Debug)]
pub struct Config {
hostname: String,
addr: IpAddr,
port: u16,
debug: bool,
restricted_mode: bool,
whitelist_mode: bool,
validate_signatures: bool,
database_url: String,
pretty_log: bool,
publish_blocks: bool,
base_uri: IriAbsoluteString,
sled_path: PathBuf,
source_repo: IriString,
opentelemetry_url: Option<IriString>,
telegram_token: Option<String>,
telegram_admin_handle: Option<String>,
api_token: Option<String>,
tls: Option<TlsConfig>,
footer_blurb: Option<String>,
local_domains: Vec<String>,
local_blurb: Option<String>,
prometheus_config: Option<PrometheusConfig>,
deliver_concurrency: u64,
client_timeout: u64,
proxy_config: Option<ProxyConfig>,
signature_threads: Option<usize>,
max_connections: usize,
base_uri: XsdAnyUri,
}
#[derive(Clone)]
struct TlsConfig {
key: PathBuf,
cert: PathBuf,
}
#[derive(Clone, Debug)]
struct PrometheusConfig {
addr: IpAddr,
port: u16,
}
#[derive(Clone, Debug)]
struct ProxyConfig {
url: IriString,
auth: Option<(String, String)>,
}
#[derive(Debug)]
pub enum UrlKind {
Activity,
Actor,
@ -110,286 +49,55 @@ pub enum UrlKind {
Outbox,
}
#[derive(Debug)]
pub enum AdminUrlKind {
Allow,
Disallow,
Block,
Unblock,
Allowed,
Blocked,
Connected,
Stats,
LastSeen,
}
impl std::fmt::Debug for Config {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Config")
.field("hostname", &self.hostname)
.field("addr", &self.addr)
.field("port", &self.port)
.field("debug", &self.debug)
.field("restricted_mode", &self.restricted_mode)
.field("validate_signatures", &self.validate_signatures)
.field("publish_blocks", &self.publish_blocks)
.field("base_uri", &self.base_uri.to_string())
.field("sled_path", &self.sled_path)
.field("source_repo", &self.source_repo.to_string())
.field(
"opentelemetry_url",
&self.opentelemetry_url.as_ref().map(|url| url.to_string()),
)
.field("telegram_token", &"[redacted]")
.field("telegram_admin_handle", &self.telegram_admin_handle)
.field("api_token", &"[redacted]")
.field("tls_key", &"[redacted]")
.field("tls_cert", &"[redacted]")
.field("footer_blurb", &self.footer_blurb)
.field("local_domains", &self.local_domains)
.field("local_blurb", &self.local_blurb)
.field("prometheus_config", &self.prometheus_config)
.field("deliver_concurrency", &self.deliver_concurrency)
.field("client_timeout", &self.client_timeout)
.field("proxy_config", &self.proxy_config)
.field("signature_threads", &self.signature_threads)
.finish()
}
}
impl Config {
pub(crate) fn build() -> Result<Self, Error> {
let config = config::Config::builder()
pub fn build() -> Result<Self, MyError> {
let mut config = config::Config::new();
config
.set_default("hostname", "localhost:8080")?
.set_default("addr", "127.0.0.1")?
.set_default("port", 8080u64)?
.set_default("port", 8080)?
.set_default("debug", true)?
.set_default("restricted_mode", false)?
.set_default("validate_signatures", true)?
.set_default("https", true)?
.set_default("whitelist_mode", false)?
.set_default("validate_signatures", false)?
.set_default("https", false)?
.set_default("pretty_log", true)?
.set_default("publish_blocks", false)?
.set_default("sled_path", "./sled/db-0-34")?
.set_default("source_repo", "https://git.asonix.dog/asonix/relay")?
.set_default("repository_commit_base", "/src/commit/")?
.set_default("opentelemetry_url", None as Option<&str>)?
.set_default("telegram_token", None as Option<&str>)?
.set_default("telegram_admin_handle", None as Option<&str>)?
.set_default("api_token", None as Option<&str>)?
.set_default("tls_key", None as Option<&str>)?
.set_default("tls_cert", None as Option<&str>)?
.set_default("footer_blurb", None as Option<&str>)?
.set_default("local_domains", None as Option<&str>)?
.set_default("local_blurb", None as Option<&str>)?
.set_default("prometheus_addr", None as Option<&str>)?
.set_default("prometheus_port", None as Option<u16>)?
.set_default("deliver_concurrency", 8u64)?
.set_default("client_timeout", 10u64)?
.set_default("proxy_url", None as Option<&str>)?
.set_default("proxy_username", None as Option<&str>)?
.set_default("proxy_password", None as Option<&str>)?
.set_default("signature_threads", None as Option<u64>)?
.add_source(Environment::default())
.build()?;
.set_default("max_connections", 2)?
.merge(Environment::new())?;
let config: ParsedConfig = config.try_deserialize()?;
let config: ParsedConfig = config.try_into()?;
let scheme = if config.https { "https" } else { "http" };
let base_uri = iri!(format!("{scheme}://{}", config.hostname)).into_absolute();
let tls = match (config.tls_key, config.tls_cert) {
(Some(key), Some(cert)) => Some(TlsConfig { key, cert }),
(Some(_), None) => {
tracing::warn!("TLS_KEY is set but TLS_CERT isn't , not building TLS config");
None
}
(None, Some(_)) => {
tracing::warn!("TLS_CERT is set but TLS_KEY isn't , not building TLS config");
None
}
(None, None) => None,
};
let local_domains = config
.local_domains
.iter()
.flat_map(|s| s.split(','))
.map(|d| d.to_string())
.collect();
let prometheus_config = match (config.prometheus_addr, config.prometheus_port) {
(Some(addr), Some(port)) => Some(PrometheusConfig { addr, port }),
(Some(_), None) => {
tracing::warn!("PROMETHEUS_ADDR is set but PROMETHEUS_PORT is not set, not building Prometheus config");
None
}
(None, Some(_)) => {
tracing::warn!("PROMETHEUS_PORT is set but PROMETHEUS_ADDR is not set, not building Prometheus config");
None
}
(None, None) => None,
};
let proxy_config = match (config.proxy_username, config.proxy_password) {
(Some(username), Some(password)) => config.proxy_url.map(|url| ProxyConfig {
url,
auth: Some((username, password)),
}),
(Some(_), None) => {
tracing::warn!(
"PROXY_USERNAME is set but PROXY_PASSWORD is not set, not setting Proxy Auth"
);
config.proxy_url.map(|url| ProxyConfig { url, auth: None })
}
(None, Some(_)) => {
tracing::warn!(
"PROXY_PASSWORD is set but PROXY_USERNAME is not set, not setting Proxy Auth"
);
config.proxy_url.map(|url| ProxyConfig { url, auth: None })
}
(None, None) => config.proxy_url.map(|url| ProxyConfig { url, auth: None }),
};
let source_url = match Self::git_hash() {
Some(hash) => format!(
"{}{}{hash}",
config.source_repo, config.repository_commit_base
)
.parse()
.expect("constructed source URL is valid"),
None => config.source_repo.clone(),
};
let base_uri = uri!(format!("{}://{}", scheme, config.hostname));
Ok(Config {
hostname: config.hostname,
addr: config.addr,
port: config.port,
debug: config.debug,
restricted_mode: config.restricted_mode,
whitelist_mode: config.whitelist_mode,
validate_signatures: config.validate_signatures,
database_url: config.database_url,
pretty_log: config.pretty_log,
publish_blocks: config.publish_blocks,
max_connections: config.max_connections,
base_uri,
sled_path: config.sled_path,
source_repo: source_url,
opentelemetry_url: config.opentelemetry_url,
telegram_token: config.telegram_token,
telegram_admin_handle: config.telegram_admin_handle,
api_token: config.api_token,
tls,
footer_blurb: config.footer_blurb,
local_domains,
local_blurb: config.local_blurb,
prometheus_config,
deliver_concurrency: config.deliver_concurrency,
client_timeout: config.client_timeout,
proxy_config,
signature_threads: config.signature_threads,
})
}
pub(crate) fn signature_threads(&self) -> usize {
self.signature_threads
.unwrap_or_else(|| {
std::thread::available_parallelism()
.map(usize::from)
.map_err(|e| tracing::warn!("Failed to get parallelism, {e}"))
.unwrap_or(1)
})
.max(1)
pub fn pretty_log(&self) -> bool {
self.pretty_log
}
pub(crate) fn client_timeout(&self) -> u64 {
self.client_timeout
pub fn max_connections(&self) -> usize {
self.max_connections
}
pub(crate) fn deliver_concurrency(&self) -> u64 {
self.deliver_concurrency
}
pub(crate) fn prometheus_bind_address(&self) -> Option<SocketAddr> {
let config = self.prometheus_config.as_ref()?;
Some((config.addr, config.port).into())
}
pub(crate) async fn open_keys(&self) -> Result<Option<CertifiedKey>, Error> {
let tls = if let Some(tls) = &self.tls {
tls
} else {
tracing::info!("No TLS config present");
return Ok(None);
};
let certs_bytes = tokio::fs::read(&tls.cert).await?;
let certs =
rustls_pemfile::certs(&mut certs_bytes.as_slice()).collect::<Result<Vec<_>, _>>()?;
if certs.is_empty() {
tracing::warn!("No certs read from certificate file");
return Ok(None);
}
let key_bytes = tokio::fs::read(&tls.key).await?;
let key = if let Some(key) = rustls_pemfile::private_key(&mut key_bytes.as_slice())? {
key
} else {
tracing::warn!("Failed to read private key");
return Ok(None);
};
let key = rustls::crypto::ring::sign::any_supported_type(&key)?;
Ok(Some(CertifiedKey::new(certs, key)))
}
pub(crate) fn footer_blurb(&self) -> Option<crate::templates::Html<String>> {
if let Some(blurb) = &self.footer_blurb {
if !blurb.is_empty() {
return Some(crate::templates::Html(
ammonia::Builder::new()
.add_tag_attributes("a", &["rel"])
.add_tag_attributes("area", &["rel"])
.add_tag_attributes("link", &["rel"])
.link_rel(None)
.clean(blurb)
.to_string(),
));
}
}
None
}
pub(crate) fn local_blurb(&self) -> Option<crate::templates::Html<String>> {
if let Some(blurb) = &self.local_blurb {
if !blurb.is_empty() {
return Some(crate::templates::Html(
ammonia::Builder::new()
.add_tag_attributes("a", &["rel"])
.add_tag_attributes("area", &["rel"])
.add_tag_attributes("link", &["rel"])
.link_rel(None)
.clean(blurb)
.to_string(),
));
}
}
None
}
pub(crate) fn local_domains(&self) -> &[String] {
&self.local_domains
}
pub(crate) fn sled_path(&self) -> &PathBuf {
&self.sled_path
}
pub(crate) fn validate_signatures(&self) -> bool {
pub fn validate_signatures(&self) -> bool {
self.validate_signatures
}
pub(crate) fn digest_middleware(&self) -> VerifyDigest<Sha256> {
pub fn digest_middleware(&self) -> VerifyDigest<Sha256> {
if self.validate_signatures {
VerifyDigest::new(Sha256::new())
} else {
@ -397,187 +105,78 @@ impl Config {
}
}
pub(crate) fn x_api_token(&self) -> Option<XApiToken> {
self.api_token.clone().map(XApiToken::new)
}
pub(crate) fn admin_config(&self) -> Option<actix_web::web::Data<AdminConfig>> {
if let Some(api_token) = &self.api_token {
match AdminConfig::build(api_token) {
Ok(conf) => Some(actix_web::web::Data::new(conf)),
Err(e) => {
tracing::error!("Error creating admin config: {e}");
None
}
}
pub fn signature_middleware(
&self,
requests: Requests,
actors: ActorCache,
) -> VerifySignature<MyVerify> {
if self.validate_signatures {
VerifySignature::new(MyVerify(requests, actors), Default::default())
} else {
None
VerifySignature::new(MyVerify(requests, actors), Default::default()).optional()
}
}
pub(crate) fn bind_address(&self) -> (IpAddr, u16) {
pub fn bind_address(&self) -> (IpAddr, u16) {
(self.addr, self.port)
}
pub(crate) fn debug(&self) -> bool {
pub fn debug(&self) -> bool {
self.debug
}
pub(crate) fn publish_blocks(&self) -> bool {
pub fn publish_blocks(&self) -> bool {
self.publish_blocks
}
pub(crate) fn restricted_mode(&self) -> bool {
self.restricted_mode
pub fn whitelist_mode(&self) -> bool {
self.whitelist_mode
}
pub(crate) fn hostname(&self) -> &str {
pub fn database_url(&self) -> &str {
&self.database_url
}
pub fn hostname(&self) -> &str {
&self.hostname
}
pub(crate) fn generate_resource(&self) -> String {
pub fn generate_resource(&self) -> String {
format!("relay@{}", self.hostname)
}
pub(crate) fn software_name() -> &'static str {
"AodeRelay"
pub fn software_name(&self) -> String {
"AodeRelay".to_owned()
}
pub(crate) fn software_version() -> String {
if let Some(git) = Self::git_version() {
return format!("v{}-{git}", Self::version());
}
format!("v{}", Self::version())
pub fn software_version(&self) -> String {
"v0.1.0-master".to_owned()
}
fn git_version() -> Option<String> {
let branch = Self::git_branch()?;
let hash = Self::git_short_hash()?;
Some(format!("{branch}-{hash}"))
pub fn source_code(&self) -> String {
"https://git.asonix.dog/asonix/ap-relay".to_owned()
}
fn name() -> &'static str {
env!("PKG_NAME")
}
pub fn generate_url(&self, kind: UrlKind) -> XsdAnyUri {
let mut uri = self.base_uri.clone();
let url = uri.as_url_mut();
fn version() -> &'static str {
env!("PKG_VERSION")
}
fn git_branch() -> Option<&'static str> {
option_env!("GIT_BRANCH")
}
fn git_hash() -> Option<&'static str> {
option_env!("GIT_HASH")
}
fn git_short_hash() -> Option<&'static str> {
option_env!("GIT_SHORT_HASH")
}
pub(crate) fn user_agent(&self) -> String {
format!(
"{} ({}/{}; +{})",
Self::software_name(),
Self::name(),
Self::software_version(),
self.generate_url(UrlKind::Index),
)
}
pub(crate) fn proxy_config(&self) -> Option<(&IriString, Option<(&str, &str)>)> {
self.proxy_config.as_ref().map(|ProxyConfig { url, auth }| {
(url, auth.as_ref().map(|(u, p)| (u.as_str(), p.as_str())))
})
}
pub(crate) fn source_code(&self) -> &IriString {
&self.source_repo
}
pub(crate) fn opentelemetry_url(&self) -> Option<&IriString> {
self.opentelemetry_url.as_ref()
}
pub(crate) fn telegram_info(&self) -> Option<(&str, &str)> {
self.telegram_token.as_deref().and_then(|token| {
let handle = self.telegram_admin_handle.as_deref()?;
Some((token, handle))
})
}
pub(crate) fn generate_url(&self, kind: UrlKind) -> IriString {
self.do_generate_url(kind).expect("Generated valid IRI")
}
fn do_generate_url(&self, kind: UrlKind) -> Result<IriString, Error> {
let iri = match kind {
UrlKind::Activity => FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new(&format!("activity/{}", Uuid::new_v4()))?.as_ref())
.try_to_dedicated_string()?,
UrlKind::Actor => FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new("actor")?.as_ref())
.try_to_dedicated_string()?,
UrlKind::Followers => FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new("followers")?.as_ref())
.try_to_dedicated_string()?,
UrlKind::Following => FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new("following")?.as_ref())
.try_to_dedicated_string()?,
UrlKind::Inbox => FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new("inbox")?.as_ref())
.try_to_dedicated_string()?,
UrlKind::Index => self.base_uri.clone().into(),
match kind {
UrlKind::Activity => url.set_path(&format!("activity/{}", Uuid::new_v4())),
UrlKind::Actor => url.set_path("actor"),
UrlKind::Followers => url.set_path("followers"),
UrlKind::Following => url.set_path("following"),
UrlKind::Inbox => url.set_path("inbox"),
UrlKind::Index => (),
UrlKind::MainKey => {
let actor = IriRelativeStr::new("actor")?;
let fragment = IriFragmentStr::new("main-key")?;
let mut resolved = FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(actor.as_ref())
.try_to_dedicated_string()?;
resolved.set_fragment(Some(fragment));
resolved
url.set_path("actor");
url.set_fragment(Some("main-key"));
}
UrlKind::Media(uuid) => FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new(&format!("media/{uuid}"))?.as_ref())
.try_to_dedicated_string()?,
UrlKind::NodeInfo => FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new("nodeinfo/2.0.json")?.as_ref())
.try_to_dedicated_string()?,
UrlKind::Outbox => FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new("outbox")?.as_ref())
.try_to_dedicated_string()?,
UrlKind::Media(uuid) => url.set_path(&format!("media/{}", uuid)),
UrlKind::NodeInfo => url.set_path("nodeinfo/2.0.json"),
UrlKind::Outbox => url.set_path("outbox"),
};
Ok(iri)
}
pub(crate) fn generate_admin_url(&self, kind: AdminUrlKind) -> IriString {
self.do_generate_admin_url(kind)
.expect("Generated valid IRI")
}
fn do_generate_admin_url(&self, kind: AdminUrlKind) -> Result<IriString, Error> {
let path = match kind {
AdminUrlKind::Allow => "api/v1/admin/allow",
AdminUrlKind::Disallow => "api/v1/admin/disallow",
AdminUrlKind::Block => "api/v1/admin/block",
AdminUrlKind::Unblock => "api/v1/admin/unblock",
AdminUrlKind::Allowed => "api/v1/admin/allowed",
AdminUrlKind::Blocked => "api/v1/admin/blocked",
AdminUrlKind::Connected => "api/v1/admin/connected",
AdminUrlKind::Stats => "api/v1/admin/stats",
AdminUrlKind::LastSeen => "api/v1/admin/last_seen",
};
let iri = FixedBaseResolver::new(self.base_uri.as_ref())
.resolve(IriRelativeStr::new(path)?.as_ref())
.try_to_dedicated_string()?;
Ok(iri)
uri
}
}

View file

@ -1,11 +0,0 @@
mod actor;
mod last_online;
mod media;
mod node;
mod state;
pub(crate) use actor::ActorCache;
pub(crate) use last_online::LastOnline;
pub(crate) use media::MediaCache;
pub(crate) use node::{Node, NodeCache};
pub(crate) use state::State;

View file

@ -1,11 +1,10 @@
use crate::{
apub::AcceptedActors,
db::{Actor, Db},
error::{Error, ErrorKind},
requests::{BreakerStrategy, Requests},
};
use activitystreams::{iri_string::types::IriString, prelude::*};
use std::time::{Duration, SystemTime};
use crate::{apub::AcceptedActors, db::Db, error::MyError, requests::Requests};
use activitystreams_new::{prelude::*, primitives::XsdAnyUri, uri};
use log::error;
use std::{collections::HashSet, sync::Arc, time::Duration};
use tokio::sync::RwLock;
use ttl_cache::TtlCache;
use uuid::Uuid;
const REFETCH_DURATION: Duration = Duration::from_secs(60 * 30);
@ -16,37 +15,105 @@ pub enum MaybeCached<T> {
}
impl<T> MaybeCached<T> {
pub(crate) fn is_cached(&self) -> bool {
matches!(self, MaybeCached::Cached(_))
pub fn is_cached(&self) -> bool {
match self {
MaybeCached::Cached(_) => true,
_ => false,
}
}
pub(crate) fn into_inner(self) -> T {
pub fn into_inner(self) -> T {
match self {
MaybeCached::Cached(t) | MaybeCached::Fetched(t) => t,
}
}
}
#[derive(Clone, Debug)]
#[derive(Clone)]
pub struct ActorCache {
db: Db,
cache: Arc<RwLock<TtlCache<XsdAnyUri, Actor>>>,
following: Arc<RwLock<HashSet<XsdAnyUri>>>,
}
impl ActorCache {
pub(crate) fn new(db: Db) -> Self {
ActorCache { db }
pub fn new(db: Db) -> Self {
let cache = ActorCache {
db,
cache: Arc::new(RwLock::new(TtlCache::new(1024 * 8))),
following: Arc::new(RwLock::new(HashSet::new())),
};
cache.spawn_rehydrate();
cache
}
#[tracing::instrument(level = "debug" name = "Get Actor", skip_all, fields(id = id.to_string().as_str()))]
pub(crate) async fn get(
pub async fn is_following(&self, id: &XsdAnyUri) -> bool {
self.following.read().await.contains(id)
}
pub async fn get_no_cache(
&self,
id: &IriString,
id: &XsdAnyUri,
requests: &Requests,
) -> Result<MaybeCached<Actor>, Error> {
if let Some(actor) = self.db.actor(id.clone()).await? {
if actor.saved_at + REFETCH_DURATION > SystemTime::now() {
return Ok(MaybeCached::Cached(actor));
}
) -> Result<Actor, MyError> {
let accepted_actor = requests.fetch::<AcceptedActors>(id.as_str()).await?;
let input_host = id.as_url().host();
let accepted_actor_id = accepted_actor.id().ok_or(MyError::MissingId)?;
let actor_host = accepted_actor_id.as_url().host();
let inbox_host = get_inbox(&accepted_actor).as_url().host();
if input_host != actor_host {
let input_host = input_host.map(|h| h.to_string()).unwrap_or_default();
let actor_host = actor_host.map(|h| h.to_string()).unwrap_or_default();
return Err(MyError::HostMismatch(input_host, actor_host));
}
if actor_host != inbox_host {
let actor_host = actor_host.map(|h| h.to_string()).unwrap_or_default();
let inbox_host = inbox_host.map(|h| h.to_string()).unwrap_or_default();
return Err(MyError::HostMismatch(actor_host, inbox_host));
}
let inbox = get_inbox(&accepted_actor).clone();
let actor = Actor {
id: accepted_actor_id.clone(),
public_key: accepted_actor.ext_one.public_key.public_key_pem,
public_key_id: accepted_actor.ext_one.public_key.id,
inbox,
};
self.cache
.write()
.await
.insert(id.clone(), actor.clone(), REFETCH_DURATION);
self.update(id, &actor.public_key, &actor.public_key_id)
.await?;
Ok(actor)
}
pub async fn get(
&self,
id: &XsdAnyUri,
requests: &Requests,
) -> Result<MaybeCached<Actor>, MyError> {
if let Some(actor) = self.cache.read().await.get(id) {
return Ok(MaybeCached::Cached(actor.clone()));
}
if let Some(actor) = self.lookup(id).await? {
self.cache
.write()
.await
.insert(id.clone(), actor.clone(), REFETCH_DURATION);
return Ok(MaybeCached::Cached(actor));
}
self.get_no_cache(id, requests)
@ -54,51 +121,231 @@ impl ActorCache {
.map(MaybeCached::Fetched)
}
#[tracing::instrument(level = "debug", name = "Add Connection", skip(self))]
pub(crate) async fn add_connection(&self, actor: Actor) -> Result<(), Error> {
self.db.add_connection(actor.id.clone()).await?;
self.db.save_actor(actor).await
pub async fn follower(&self, actor: &Actor) -> Result<(), MyError> {
self.save(actor.clone()).await
}
#[tracing::instrument(level = "debug", name = "Remove Connection", skip(self))]
pub(crate) async fn remove_connection(&self, actor: &Actor) -> Result<(), Error> {
self.db.remove_connection(actor.id.clone()).await
pub async fn cache_follower(&self, id: XsdAnyUri) {
self.following.write().await.insert(id);
}
#[tracing::instrument(level = "debug", name = "Fetch remote actor", skip_all, fields(id = id.to_string().as_str()))]
pub(crate) async fn get_no_cache(
&self,
id: &IriString,
requests: &Requests,
) -> Result<Actor, Error> {
let accepted_actor = requests
.fetch::<AcceptedActors>(id, BreakerStrategy::Require2XX)
pub async fn bust_follower(&self, id: &XsdAnyUri) {
self.following.write().await.remove(id);
}
pub async fn unfollower(&self, actor: &Actor) -> Result<Option<Uuid>, MyError> {
let row_opt = self
.db
.pool()
.get()
.await?
.query_opt(
"DELETE FROM actors
WHERE actor_id = $1::TEXT
RETURNING listener_id;",
&[&actor.id.as_str()],
)
.await?;
let input_authority = id.authority_components().ok_or(ErrorKind::MissingDomain)?;
let accepted_actor_id = accepted_actor
.id(input_authority.host(), input_authority.port())?
.ok_or(ErrorKind::MissingId)?;
let inbox = get_inbox(&accepted_actor)?.clone();
let actor = Actor {
id: accepted_actor_id.clone(),
public_key: accepted_actor.ext_one.public_key.public_key_pem,
public_key_id: accepted_actor.ext_one.public_key.id,
inbox,
saved_at: SystemTime::now(),
let row = if let Some(row) = row_opt {
row
} else {
return Ok(None);
};
self.db.save_actor(actor.clone()).await?;
let listener_id: Uuid = row.try_get(0)?;
Ok(actor)
let row_opt = self
.db
.pool()
.get()
.await?
.query_opt(
"SELECT FROM actors
WHERE listener_id = $1::UUID;",
&[&listener_id],
)
.await?;
if row_opt.is_none() {
return Ok(Some(listener_id));
}
Ok(None)
}
async fn lookup(&self, id: &XsdAnyUri) -> Result<Option<Actor>, MyError> {
let row_opt = self
.db
.pool()
.get()
.await?
.query_opt(
"SELECT listeners.actor_id, actors.public_key, actors.public_key_id
FROM listeners
INNER JOIN actors ON actors.listener_id = listeners.id
WHERE
actors.actor_id = $1::TEXT
AND
actors.updated_at + INTERVAL '120 seconds' < NOW()
LIMIT 1;",
&[&id.as_str()],
)
.await?;
let row = if let Some(row) = row_opt {
row
} else {
return Ok(None);
};
let inbox: String = row.try_get(0)?;
let public_key_id: String = row.try_get(2)?;
Ok(Some(Actor {
id: id.clone(),
inbox: uri!(inbox),
public_key: row.try_get(1)?,
public_key_id: uri!(public_key_id),
}))
}
async fn save(&self, actor: Actor) -> Result<(), MyError> {
let row_opt = self
.db
.pool()
.get()
.await?
.query_opt(
"SELECT id FROM listeners WHERE actor_id = $1::TEXT LIMIT 1;",
&[&actor.inbox.as_str()],
)
.await?;
let row = if let Some(row) = row_opt {
row
} else {
return Err(MyError::NotSubscribed(actor.id.as_str().to_owned()));
};
let listener_id: Uuid = row.try_get(0)?;
self.db
.pool()
.get()
.await?
.execute(
"INSERT INTO actors (
actor_id,
public_key,
public_key_id,
listener_id,
created_at,
updated_at
) VALUES (
$1::TEXT,
$2::TEXT,
$3::TEXT,
$4::UUID,
'now',
'now'
) ON CONFLICT (actor_id)
DO UPDATE SET public_key = $2::TEXT;",
&[
&actor.id.as_str(),
&actor.public_key,
&actor.public_key_id.as_str(),
&listener_id,
],
)
.await?;
Ok(())
}
async fn update(
&self,
id: &XsdAnyUri,
public_key: &str,
public_key_id: &XsdAnyUri,
) -> Result<(), MyError> {
self.db
.pool()
.get()
.await?
.execute(
"UPDATE actors
SET public_key = $2::TEXT, public_key_id = $3::TEXT
WHERE actor_id = $1::TEXT;",
&[&id.as_str(), &public_key, &public_key_id.as_str()],
)
.await?;
Ok(())
}
fn spawn_rehydrate(&self) {
use actix_rt::time::{interval_at, Instant};
let this = self.clone();
actix_rt::spawn(async move {
let mut interval = interval_at(Instant::now(), Duration::from_secs(60 * 10));
loop {
if let Err(e) = this.rehydrate().await {
error!("Error rehydrating follows, {}", e);
}
interval.tick().await;
}
});
}
async fn rehydrate(&self) -> Result<(), MyError> {
let rows = self
.db
.pool()
.get()
.await?
.query("SELECT actor_id FROM actors;", &[])
.await?;
let actor_ids = rows
.into_iter()
.filter_map(|row| match row.try_get(0) {
Ok(s) => {
let s: String = s;
match s.parse() {
Ok(s) => Some(s),
Err(e) => {
error!("Error parsing actor id, {}", e);
None
}
}
}
Err(e) => {
error!("Error getting actor id from row, {}", e);
None
}
})
.collect();
let mut write_guard = self.following.write().await;
*write_guard = actor_ids;
Ok(())
}
}
fn get_inbox(actor: &AcceptedActors) -> Result<&IriString, Error> {
Ok(actor
.endpoints()?
fn get_inbox(actor: &AcceptedActors) -> &XsdAnyUri {
actor
.endpoints()
.and_then(|e| e.shared_inbox.as_ref())
.unwrap_or(actor.inbox()?))
.unwrap_or(actor.inbox())
}
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct Actor {
pub id: XsdAnyUri,
pub public_key: String,
pub public_key_id: XsdAnyUri,
pub inbox: XsdAnyUri,
}

View file

@ -1,28 +0,0 @@
use activitystreams::iri_string::types::IriStr;
use std::{collections::HashMap, sync::Mutex};
use time::OffsetDateTime;
pub(crate) struct LastOnline {
domains: Mutex<HashMap<String, OffsetDateTime>>,
}
impl LastOnline {
pub(crate) fn mark_seen(&self, iri: &IriStr) {
if let Some(authority) = iri.authority_str() {
let mut guard = self.domains.lock().unwrap();
guard.insert(authority.to_string(), OffsetDateTime::now_utc());
metrics::gauge!("relay.last-online.size",)
.set(crate::collector::recordable(guard.len()));
}
}
pub(crate) fn take(&self) -> HashMap<String, OffsetDateTime> {
std::mem::take(&mut *self.domains.lock().unwrap())
}
pub(crate) fn empty() -> Self {
Self {
domains: Mutex::new(HashMap::default()),
}
}
}

View file

@ -1,33 +1,171 @@
use crate::{db::Db, error::Error};
use activitystreams::iri_string::types::IriString;
use crate::{db::Db, error::MyError};
use activitystreams_new::primitives::XsdAnyUri;
use async_mutex::Mutex;
use bytes::Bytes;
use futures::join;
use lru::LruCache;
use std::{collections::HashMap, sync::Arc, time::Duration};
use tokio::sync::RwLock;
use ttl_cache::TtlCache;
use uuid::Uuid;
#[derive(Clone, Debug)]
pub struct MediaCache {
static MEDIA_DURATION: Duration = Duration::from_secs(60 * 60 * 24 * 2);
#[derive(Clone)]
pub struct Media {
db: Db,
inverse: Arc<Mutex<HashMap<XsdAnyUri, Uuid>>>,
url_cache: Arc<Mutex<LruCache<Uuid, XsdAnyUri>>>,
byte_cache: Arc<RwLock<TtlCache<Uuid, (String, Bytes)>>>,
}
impl MediaCache {
pub(crate) fn new(db: Db) -> Self {
MediaCache { db }
impl Media {
pub fn new(db: Db) -> Self {
Media {
db,
inverse: Arc::new(Mutex::new(HashMap::new())),
url_cache: Arc::new(Mutex::new(LruCache::new(128))),
byte_cache: Arc::new(RwLock::new(TtlCache::new(128))),
}
}
#[tracing::instrument(level = "debug", name = "Get media uuid", skip_all, fields(url = url.to_string().as_str()))]
pub(crate) async fn get_uuid(&self, url: IriString) -> Result<Option<Uuid>, Error> {
self.db.media_id(url).await
pub async fn get_uuid(&self, url: &XsdAnyUri) -> Result<Option<Uuid>, MyError> {
let res = self.inverse.lock().await.get(url).cloned();
let uuid = match res {
Some(uuid) => uuid,
_ => {
let row_opt = self
.db
.pool()
.get()
.await?
.query_opt(
"SELECT media_id
FROM media
WHERE url = $1::TEXT
LIMIT 1;",
&[&url.as_str()],
)
.await?;
if let Some(row) = row_opt {
let uuid: Uuid = row.try_get(0)?;
self.inverse.lock().await.insert(url.clone(), uuid);
uuid
} else {
return Ok(None);
}
}
};
if self.url_cache.lock().await.contains(&uuid) {
return Ok(Some(uuid));
}
let row_opt = self
.db
.pool()
.get()
.await?
.query_opt(
"SELECT id
FROM media
WHERE
url = $1::TEXT
AND
media_id = $2::UUID
LIMIT 1;",
&[&url.as_str(), &uuid],
)
.await?;
if row_opt.is_some() {
self.url_cache.lock().await.put(uuid, url.clone());
return Ok(Some(uuid));
}
self.inverse.lock().await.remove(url);
Ok(None)
}
#[tracing::instrument(level = "debug", name = "Get media url", skip(self))]
pub(crate) async fn get_url(&self, uuid: Uuid) -> Result<Option<IriString>, Error> {
self.db.media_url(uuid).await
pub async fn get_url(&self, uuid: Uuid) -> Result<Option<XsdAnyUri>, MyError> {
if let Some(url) = self.url_cache.lock().await.get(&uuid).cloned() {
return Ok(Some(url));
}
let row_opt = self
.db
.pool()
.get()
.await?
.query_opt(
"SELECT url
FROM media
WHERE media_id = $1::UUID
LIMIT 1;",
&[&uuid],
)
.await?;
if let Some(row) = row_opt {
let url: String = row.try_get(0)?;
let url: XsdAnyUri = url.parse()?;
return Ok(Some(url));
}
Ok(None)
}
#[tracing::instrument(name = "Store media url", skip_all, fields(url = url.to_string().as_str()))]
pub(crate) async fn store_url(&self, url: IriString) -> Result<Uuid, Error> {
pub async fn get_bytes(&self, uuid: Uuid) -> Option<(String, Bytes)> {
self.byte_cache.read().await.get(&uuid).cloned()
}
pub async fn store_url(&self, url: &XsdAnyUri) -> Result<Uuid, MyError> {
let uuid = Uuid::new_v4();
self.db.save_url(url, uuid).await?;
let (_, _, res) = join!(
async {
self.inverse.lock().await.insert(url.clone(), uuid);
},
async {
self.url_cache.lock().await.put(uuid, url.clone());
},
async {
self.db
.pool()
.get()
.await?
.execute(
"INSERT INTO media (
media_id,
url,
created_at,
updated_at
) VALUES (
$1::UUID,
$2::TEXT,
'now',
'now'
) ON CONFLICT (media_id)
DO UPDATE SET url = $2::TEXT;",
&[&uuid, &url.as_str()],
)
.await?;
Ok(()) as Result<(), MyError>
}
);
res?;
Ok(uuid)
}
pub async fn store_bytes(&self, uuid: Uuid, content_type: String, bytes: Bytes) {
self.byte_cache
.write()
.await
.insert(uuid, (content_type, bytes), MEDIA_DURATION);
}
}

11
src/data/mod.rs Normal file
View file

@ -0,0 +1,11 @@
mod actor;
mod media;
mod node;
mod state;
pub use self::{
actor::{Actor, ActorCache},
media::Media,
node::{Contact, Info, Instance, Node, NodeCache},
state::State,
};

View file

@ -1,229 +1,444 @@
use crate::{
db::{Contact, Db, Info, Instance},
error::{Error, ErrorKind},
use crate::{db::Db, error::MyError};
use activitystreams_new::{primitives::XsdAnyUri, uri};
use log::{debug, error};
use std::{
collections::{HashMap, HashSet},
sync::Arc,
time::{Duration, SystemTime},
};
use activitystreams::{iri, iri_string::types::IriString};
use std::time::{Duration, SystemTime};
use tokio::sync::RwLock;
use tokio_postgres::types::Json;
use uuid::Uuid;
#[derive(Clone, Debug)]
pub type ListenersCache = Arc<RwLock<HashSet<XsdAnyUri>>>;
#[derive(Clone)]
pub struct NodeCache {
db: Db,
}
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub struct Node {
pub(crate) base: IriString,
pub(crate) info: Option<Info>,
pub(crate) instance: Option<Instance>,
pub(crate) contact: Option<Contact>,
}
impl std::fmt::Debug for Node {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Node")
.field("base", &self.base.to_string())
.field("info", &self.info)
.field("instance", &self.instance)
.field("contact", &self.contact)
.finish()
}
listeners: ListenersCache,
nodes: Arc<RwLock<HashMap<XsdAnyUri, Node>>>,
}
impl NodeCache {
pub(crate) fn new(db: Db) -> Self {
NodeCache { db }
pub fn new(db: Db, listeners: ListenersCache) -> Self {
NodeCache {
db,
listeners,
nodes: Arc::new(RwLock::new(HashMap::new())),
}
}
#[tracing::instrument(level = "debug", name = "Get nodes", skip(self))]
pub(crate) async fn nodes(&self) -> Result<Vec<Node>, Error> {
let infos = self.db.connected_info().await?;
let instances = self.db.connected_instance().await?;
let contacts = self.db.connected_contact().await?;
pub async fn nodes(&self) -> Vec<Node> {
let listeners: HashSet<_> = self.listeners.read().await.clone();
let vec = self
.db
.connected_ids()
.await?
.into_iter()
.map(move |actor_id| {
let info = infos.get(&actor_id).cloned();
let instance = instances.get(&actor_id).cloned();
let contact = contacts.get(&actor_id).cloned();
Node::new(actor_id).map(|node| node.info(info).instance(instance).contact(contact))
self.nodes
.read()
.await
.iter()
.filter_map(|(k, v)| {
if listeners.contains(k) {
Some(v.clone())
} else {
None
}
})
.collect::<Result<Vec<Node>, Error>>()?;
Ok(vec)
.collect()
}
#[tracing::instrument(level = "debug", name = "Is NodeInfo Outdated", skip_all, fields(actor_id = actor_id.to_string().as_str()))]
pub(crate) async fn is_nodeinfo_outdated(&self, actor_id: IriString) -> bool {
self.db
.info(actor_id)
.await
.map(|opt| opt.map(|info| info.outdated()).unwrap_or(true))
.unwrap_or(true)
pub async fn is_nodeinfo_outdated(&self, listener: &XsdAnyUri) -> bool {
let read_guard = self.nodes.read().await;
let node = match read_guard.get(listener) {
None => {
debug!("No node for listener {}", listener);
return true;
}
Some(node) => node,
};
match node.info.as_ref() {
Some(nodeinfo) => nodeinfo.outdated(),
None => {
debug!("No info for node {}", node.base);
true
}
}
}
#[tracing::instrument(level = "debug", name = "Is Contact Outdated", skip_all, fields(actor_id = actor_id.to_string().as_str()))]
pub(crate) async fn is_contact_outdated(&self, actor_id: IriString) -> bool {
self.db
.contact(actor_id)
.await
.map(|opt| opt.map(|contact| contact.outdated()).unwrap_or(true))
.unwrap_or(true)
pub async fn is_contact_outdated(&self, listener: &XsdAnyUri) -> bool {
let read_guard = self.nodes.read().await;
let node = match read_guard.get(listener) {
None => {
debug!("No node for listener {}", listener);
return true;
}
Some(node) => node,
};
match node.contact.as_ref() {
Some(contact) => contact.outdated(),
None => {
debug!("No contact for node {}", node.base);
true
}
}
}
#[tracing::instrument(level = "debug", name = "Is Instance Outdated", skip_all, fields(actor_id = actor_id.to_string().as_str()))]
pub(crate) async fn is_instance_outdated(&self, actor_id: IriString) -> bool {
self.db
.instance(actor_id)
.await
.map(|opt| opt.map(|instance| instance.outdated()).unwrap_or(true))
.unwrap_or(true)
pub async fn is_instance_outdated(&self, listener: &XsdAnyUri) -> bool {
let read_guard = self.nodes.read().await;
let node = match read_guard.get(listener) {
None => {
debug!("No node for listener {}", listener);
return true;
}
Some(node) => node,
};
match node.instance.as_ref() {
Some(instance) => instance.outdated(),
None => {
debug!("No instance for node {}", node.base);
true
}
}
}
#[tracing::instrument(level = "debug", name = "Save node info", skip_all, fields(actor_id = actor_id.to_string().as_str(), software, version, reg))]
pub(crate) async fn set_info(
pub async fn cache_by_id(&self, id: Uuid) {
if let Err(e) = self.do_cache_by_id(id).await {
error!("Error loading node into cache, {}", e);
}
}
pub async fn bust_by_id(&self, id: Uuid) {
if let Err(e) = self.do_bust_by_id(id).await {
error!("Error busting node cache, {}", e);
}
}
async fn do_bust_by_id(&self, id: Uuid) -> Result<(), MyError> {
let row_opt = self
.db
.pool()
.get()
.await?
.query_opt(
"SELECT ls.actor_id
FROM listeners AS ls
INNER JOIN nodes AS nd ON nd.listener_id = ls.id
WHERE nd.id = $1::UUID
LIMIT 1;",
&[&id],
)
.await?;
let row = if let Some(row) = row_opt {
row
} else {
return Ok(());
};
let listener: String = row.try_get(0)?;
self.nodes.write().await.remove(&uri!(listener));
Ok(())
}
async fn do_cache_by_id(&self, id: Uuid) -> Result<(), MyError> {
let row_opt = self
.db
.pool()
.get()
.await?
.query_opt(
"SELECT ls.actor_id, nd.nodeinfo, nd.instance, nd.contact
FROM nodes AS nd
INNER JOIN listeners AS ls ON nd.listener_id = ls.id
WHERE nd.id = $1::UUID
LIMIT 1;",
&[&id],
)
.await?;
let row = if let Some(row) = row_opt {
row
} else {
return Ok(());
};
let listener: String = row.try_get(0)?;
let listener = uri!(listener);
let info: Option<Json<Info>> = row.try_get(1)?;
let instance: Option<Json<Instance>> = row.try_get(2)?;
let contact: Option<Json<Contact>> = row.try_get(3)?;
{
let mut write_guard = self.nodes.write().await;
let node = write_guard
.entry(listener.clone())
.or_insert_with(|| Node::new(listener));
if let Some(info) = info {
node.info = Some(info.0);
}
if let Some(instance) = instance {
node.instance = Some(instance.0);
}
if let Some(contact) = contact {
node.contact = Some(contact.0);
}
}
Ok(())
}
pub async fn set_info(
&self,
actor_id: IriString,
listener: &XsdAnyUri,
software: String,
version: String,
reg: bool,
) -> Result<(), Error> {
self.db
.save_info(
actor_id,
Info {
software,
version,
reg,
updated: SystemTime::now(),
},
)
.await
) -> Result<(), MyError> {
if !self.listeners.read().await.contains(listener) {
let mut nodes = self.nodes.write().await;
nodes.remove(listener);
return Ok(());
}
let node = {
let mut write_guard = self.nodes.write().await;
let node = write_guard
.entry(listener.clone())
.or_insert_with(|| Node::new(listener.clone()));
node.set_info(software, version, reg);
node.clone()
};
self.save(listener, &node).await?;
Ok(())
}
#[tracing::instrument(
level = "debug",
name = "Save instance info",
skip_all,
fields(
actor_id = actor_id.to_string().as_str(),
title,
description,
version,
reg,
requires_approval
)
)]
pub(crate) async fn set_instance(
pub async fn set_instance(
&self,
actor_id: IriString,
listener: &XsdAnyUri,
title: String,
description: String,
version: String,
reg: bool,
requires_approval: bool,
) -> Result<(), Error> {
self.db
.save_instance(
actor_id,
Instance {
title,
description,
version,
reg,
requires_approval,
updated: SystemTime::now(),
},
)
.await
) -> Result<(), MyError> {
if !self.listeners.read().await.contains(listener) {
let mut nodes = self.nodes.write().await;
nodes.remove(listener);
return Ok(());
}
let node = {
let mut write_guard = self.nodes.write().await;
let node = write_guard
.entry(listener.clone())
.or_insert_with(|| Node::new(listener.clone()));
node.set_instance(title, description, version, reg, requires_approval);
node.clone()
};
self.save(listener, &node).await?;
Ok(())
}
#[tracing::instrument(
level = "debug",
name = "Save contact info",
skip_all,
fields(
actor_id = actor_id.to_string().as_str(),
username,
display_name,
url = url.to_string().as_str(),
avatar = avatar.to_string().as_str()
)
)]
pub(crate) async fn set_contact(
pub async fn set_contact(
&self,
actor_id: IriString,
listener: &XsdAnyUri,
username: String,
display_name: String,
url: IriString,
avatar: IriString,
) -> Result<(), Error> {
self.db
.save_contact(
actor_id,
Contact {
username,
display_name,
url,
avatar,
updated: SystemTime::now(),
},
url: XsdAnyUri,
avatar: XsdAnyUri,
) -> Result<(), MyError> {
if !self.listeners.read().await.contains(listener) {
let mut nodes = self.nodes.write().await;
nodes.remove(listener);
return Ok(());
}
let node = {
let mut write_guard = self.nodes.write().await;
let node = write_guard
.entry(listener.clone())
.or_insert_with(|| Node::new(listener.clone()));
node.set_contact(username, display_name, url, avatar);
node.clone()
};
self.save(listener, &node).await?;
Ok(())
}
pub async fn save(&self, listener: &XsdAnyUri, node: &Node) -> Result<(), MyError> {
let row_opt = self
.db
.pool()
.get()
.await?
.query_opt(
"SELECT id FROM listeners WHERE actor_id = $1::TEXT LIMIT 1;",
&[&listener.as_str()],
)
.await
.await?;
let id: Uuid = if let Some(row) = row_opt {
row.try_get(0)?
} else {
return Err(MyError::NotSubscribed(listener.as_str().to_owned()));
};
self.db
.pool()
.get()
.await?
.execute(
"INSERT INTO nodes (
listener_id,
nodeinfo,
instance,
contact,
created_at,
updated_at
) VALUES (
$1::UUID,
$2::JSONB,
$3::JSONB,
$4::JSONB,
'now',
'now'
) ON CONFLICT (listener_id)
DO UPDATE SET
nodeinfo = $2::JSONB,
instance = $3::JSONB,
contact = $4::JSONB;",
&[
&id,
&Json(&node.info),
&Json(&node.instance),
&Json(&node.contact),
],
)
.await?;
Ok(())
}
}
#[derive(Clone, Debug)]
pub struct Node {
pub base: XsdAnyUri,
pub info: Option<Info>,
pub instance: Option<Instance>,
pub contact: Option<Contact>,
}
impl Node {
fn new(url: IriString) -> Result<Self, Error> {
let authority = url.authority_str().ok_or(ErrorKind::MissingDomain)?;
let scheme = url.scheme_str();
pub fn new(mut uri: XsdAnyUri) -> Self {
let url = uri.as_mut();
url.set_fragment(None);
url.set_query(None);
url.set_path("");
let base = iri!(format!("{scheme}://{authority}"));
Ok(Node {
base,
Node {
base: uri,
info: None,
instance: None,
contact: None,
})
}
}
fn info(mut self, info: Option<Info>) -> Self {
self.info = info;
fn set_info(&mut self, software: String, version: String, reg: bool) -> &mut Self {
self.info = Some(Info {
software,
version,
reg,
updated: SystemTime::now(),
});
self
}
fn instance(mut self, instance: Option<Instance>) -> Self {
self.instance = instance;
fn set_instance(
&mut self,
title: String,
description: String,
version: String,
reg: bool,
requires_approval: bool,
) -> &mut Self {
self.instance = Some(Instance {
title,
description,
version,
reg,
requires_approval,
updated: SystemTime::now(),
});
self
}
fn contact(mut self, contact: Option<Contact>) -> Self {
self.contact = contact;
fn set_contact(
&mut self,
username: String,
display_name: String,
url: XsdAnyUri,
avatar: XsdAnyUri,
) -> &mut Self {
self.contact = Some(Contact {
username,
display_name,
url,
avatar,
updated: SystemTime::now(),
});
self
}
}
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct Info {
pub software: String,
pub version: String,
pub reg: bool,
pub updated: SystemTime,
}
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct Instance {
pub title: String,
pub description: String,
pub version: String,
pub reg: bool,
pub requires_approval: bool,
pub updated: SystemTime,
}
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct Contact {
pub username: String,
pub display_name: String,
pub url: XsdAnyUri,
pub avatar: XsdAnyUri,
pub updated: SystemTime,
}
static TEN_MINUTES: Duration = Duration::from_secs(60 * 10);
impl Info {
pub(crate) fn outdated(&self) -> bool {
pub fn outdated(&self) -> bool {
self.updated + TEN_MINUTES < SystemTime::now()
}
}
impl Instance {
pub(crate) fn outdated(&self) -> bool {
pub fn outdated(&self) -> bool {
self.updated + TEN_MINUTES < SystemTime::now()
}
}
impl Contact {
pub(crate) fn outdated(&self) -> bool {
pub fn outdated(&self) -> bool {
self.updated + TEN_MINUTES < SystemTime::now()
}
}

View file

@ -1,136 +1,215 @@
use crate::{
config::{Config, UrlKind},
data::NodeCache,
db::Db,
error::Error,
requests::{Breakers, Requests},
spawner::Spawner,
error::MyError,
requests::Requests,
};
use activitystreams_new::primitives::XsdAnyUri;
use actix_rt::{
spawn,
time::{interval_at, Instant},
};
use activitystreams::iri_string::types::IriString;
use actix_web::web;
use futures::{join, try_join};
use log::{error, info};
use lru::LruCache;
use rand::thread_rng;
use reqwest_middleware::ClientWithMiddleware;
use rsa::{RsaPrivateKey, RsaPublicKey};
use std::sync::{Arc, RwLock};
use super::LastOnline;
use rsa::{RSAPrivateKey, RSAPublicKey};
use std::{collections::HashSet, sync::Arc, time::Duration};
use tokio::sync::RwLock;
#[derive(Clone)]
pub struct State {
pub(crate) requests: Requests,
pub(crate) public_key: RsaPublicKey,
object_cache: Arc<RwLock<LruCache<IriString, IriString>>>,
pub(crate) node_cache: NodeCache,
breakers: Breakers,
pub(crate) last_online: Arc<LastOnline>,
pub(crate) db: Db,
}
impl std::fmt::Debug for State {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("State")
.field("node_cache", &self.node_cache)
.field("breakers", &self.breakers)
.field("db", &self.db)
.finish()
}
pub public_key: RSAPublicKey,
private_key: RSAPrivateKey,
config: Config,
actor_id_cache: Arc<RwLock<LruCache<XsdAnyUri, XsdAnyUri>>>,
blocks: Arc<RwLock<HashSet<String>>>,
whitelists: Arc<RwLock<HashSet<String>>>,
listeners: Arc<RwLock<HashSet<XsdAnyUri>>>,
node_cache: NodeCache,
}
impl State {
#[tracing::instrument(
level = "debug",
name = "Get inboxes for other domains",
skip_all,
fields(
existing_inbox = existing_inbox.to_string().as_str(),
authority
pub fn node_cache(&self) -> NodeCache {
self.node_cache.clone()
}
pub fn requests(&self) -> Requests {
Requests::new(
self.config.generate_url(UrlKind::MainKey).to_string(),
self.private_key.clone(),
format!(
"Actix Web 3.0.0-alpha.1 ({}/{}; +{})",
self.config.software_name(),
self.config.software_version(),
self.config.generate_url(UrlKind::Index),
),
)
)]
pub(crate) async fn inboxes_without(
&self,
existing_inbox: &IriString,
authority: &str,
) -> Result<Vec<IriString>, Error> {
Ok(self
.db
.inboxes()
.await?
}
pub async fn bust_whitelist(&self, whitelist: &str) {
self.whitelists.write().await.remove(whitelist);
}
pub async fn bust_block(&self, block: &str) {
self.blocks.write().await.remove(block);
}
pub async fn bust_listener(&self, inbox: &XsdAnyUri) {
self.listeners.write().await.remove(inbox);
}
pub async fn listeners(&self) -> Vec<XsdAnyUri> {
self.listeners.read().await.iter().cloned().collect()
}
pub async fn blocks(&self) -> Vec<String> {
self.blocks.read().await.iter().cloned().collect()
}
pub async fn listeners_without(&self, inbox: &XsdAnyUri, domain: &str) -> Vec<XsdAnyUri> {
self.listeners
.read()
.await
.iter()
.filter_map(|inbox| {
if let Some(authority_str) = inbox.authority_str() {
if inbox != existing_inbox && authority_str != authority {
return Some(inbox.clone());
.filter_map(|listener| {
if let Some(dom) = listener.as_url().domain() {
if listener != inbox && dom != domain {
return Some(listener.clone());
}
}
None
})
.collect())
.collect()
}
pub(crate) fn is_cached(&self, object_id: &IriString) -> bool {
self.object_cache.read().unwrap().contains(object_id)
pub async fn is_whitelisted(&self, actor_id: &XsdAnyUri) -> bool {
if !self.config.whitelist_mode() {
return true;
}
if let Some(domain) = actor_id.as_url().domain() {
return self.whitelists.read().await.contains(domain);
}
false
}
pub(crate) fn cache(&self, object_id: IriString, actor_id: IriString) {
let mut guard = self.object_cache.write().unwrap();
guard.put(object_id, actor_id);
metrics::gauge!("relay.object-cache.size").set(crate::collector::recordable(guard.len()));
pub async fn is_blocked(&self, actor_id: &XsdAnyUri) -> bool {
if let Some(domain) = actor_id.as_url().domain() {
return self.blocks.read().await.contains(domain);
}
true
}
pub(crate) fn is_connected(&self, iri: &IriString) -> bool {
self.breakers.should_try(iri)
pub async fn is_listener(&self, actor_id: &XsdAnyUri) -> bool {
self.listeners.read().await.contains(actor_id)
}
#[tracing::instrument(level = "debug", name = "Building state", skip_all)]
pub(crate) async fn build(
db: Db,
key_id: String,
spawner: Spawner,
client: ClientWithMiddleware,
) -> Result<Self, Error> {
let private_key = if let Ok(Some(key)) = db.private_key().await {
tracing::debug!("Using existing key");
key
} else {
tracing::info!("Generating new keys");
let key = web::block(move || {
let mut rng = thread_rng();
RsaPrivateKey::new(&mut rng, 4096)
})
.await??;
pub async fn is_cached(&self, object_id: &XsdAnyUri) -> bool {
self.actor_id_cache.read().await.contains(object_id)
}
db.update_private_key(&key).await?;
pub async fn cache(&self, object_id: XsdAnyUri, actor_id: XsdAnyUri) {
self.actor_id_cache.write().await.put(object_id, actor_id);
}
key
};
pub async fn cache_block(&self, host: String) {
self.blocks.write().await.insert(host);
}
let public_key = private_key.to_public_key();
pub async fn cache_whitelist(&self, host: String) {
self.whitelists.write().await.insert(host);
}
let breakers = Breakers::default();
let last_online = Arc::new(LastOnline::empty());
pub async fn cache_listener(&self, listener: XsdAnyUri) {
self.listeners.write().await.insert(listener);
}
let requests = Requests::new(
key_id,
private_key,
breakers.clone(),
last_online.clone(),
spawner,
client,
pub async fn rehydrate(&self, db: &Db) -> Result<(), MyError> {
let f1 = db.hydrate_blocks();
let f2 = db.hydrate_whitelists();
let f3 = db.hydrate_listeners();
let (blocks, whitelists, listeners) = try_join!(f1, f2, f3)?;
join!(
async move {
*self.listeners.write().await = listeners;
},
async move {
*self.whitelists.write().await = whitelists;
},
async move {
*self.blocks.write().await = blocks;
}
);
let state = State {
requests,
public_key,
object_cache: Arc::new(RwLock::new(LruCache::new(
(1024 * 8).try_into().expect("nonzero"),
))),
node_cache: NodeCache::new(db.clone()),
breakers,
db,
last_online,
Ok(())
}
pub async fn hydrate(config: Config, db: &Db) -> Result<Self, MyError> {
let f1 = db.hydrate_blocks();
let f2 = db.hydrate_whitelists();
let f3 = db.hydrate_listeners();
let f4 = async move {
if let Ok(Some(key)) = db.hydrate_private_key().await {
Ok(key)
} else {
info!("Generating new keys");
let key = web::block(move || {
let mut rng = thread_rng();
RSAPrivateKey::new(&mut rng, 4096)
})
.await?;
db.update_private_key(&key).await?;
Ok(key)
}
};
let (blocks, whitelists, listeners, private_key) = try_join!(f1, f2, f3, f4)?;
let public_key = private_key.to_public_key();
let listeners = Arc::new(RwLock::new(listeners));
let state = State {
public_key,
private_key,
config,
actor_id_cache: Arc::new(RwLock::new(LruCache::new(1024 * 8))),
blocks: Arc::new(RwLock::new(blocks)),
whitelists: Arc::new(RwLock::new(whitelists)),
listeners: listeners.clone(),
node_cache: NodeCache::new(db.clone(), listeners),
};
state.spawn_rehydrate(db.clone());
Ok(state)
}
fn spawn_rehydrate(&self, db: Db) {
let state = self.clone();
spawn(async move {
let start = Instant::now();
let duration = Duration::from_secs(60 * 10);
let mut interval = interval_at(start, duration);
loop {
interval.tick().await;
if let Err(e) = state.rehydrate(&db).await {
error!("Error rehydrating, {}", e);
}
}
});
}
}

1066
src/db.rs

File diff suppressed because it is too large Load diff

View file

@ -1,169 +1,43 @@
use activitystreams::checked::CheckError;
use activitystreams_new::primitives::XsdAnyUriError;
use actix_web::{
error::{BlockingError, ResponseError},
http::StatusCode,
HttpResponse,
};
use background_jobs::BoxError;
use color_eyre::eyre::Error as Report;
use http_signature_normalization_reqwest::SignError;
use std::{convert::Infallible, io, sync::Arc};
use tokio::task::JoinError;
#[derive(Clone)]
struct ArcKind {
kind: Arc<ErrorKind>,
}
impl std::fmt::Debug for ArcKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.kind.fmt(f)
}
}
impl std::fmt::Display for ArcKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.kind.fmt(f)
}
}
impl std::error::Error for ArcKind {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
self.kind.source()
}
}
pub(crate) struct Error {
kind: ArcKind,
display: Box<str>,
debug: Box<str>,
}
impl Error {
fn kind(&self) -> &ErrorKind {
&self.kind.kind
}
pub(crate) fn is_breaker(&self) -> bool {
matches!(self.kind(), ErrorKind::Breaker)
}
pub(crate) fn is_not_found(&self) -> bool {
matches!(self.kind(), ErrorKind::Status(_, StatusCode::NOT_FOUND))
}
pub(crate) fn is_bad_request(&self) -> bool {
matches!(self.kind(), ErrorKind::Status(_, StatusCode::BAD_REQUEST))
}
pub(crate) fn is_gone(&self) -> bool {
matches!(self.kind(), ErrorKind::Status(_, StatusCode::GONE))
}
pub(crate) fn is_malformed_json(&self) -> bool {
matches!(self.kind(), ErrorKind::Json(_))
}
}
impl std::fmt::Debug for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(&self.debug)
}
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str(&self.display)
}
}
impl std::error::Error for Error {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
self.kind().source()
}
}
impl<T> From<T> for Error
where
ErrorKind: From<T>,
{
fn from(error: T) -> Self {
let kind = ArcKind {
kind: Arc::new(ErrorKind::from(error)),
};
let report = Report::new(kind.clone());
let display = format!("{report}");
let debug = format!("{report:?}");
Error {
kind,
display: Box::from(display),
debug: Box::from(debug),
}
}
}
use deadpool::managed::{PoolError, TimeoutType};
use http_signature_normalization_actix::PrepareSignError;
use log::error;
use rsa_pem::KeyError;
use std::{convert::Infallible, fmt::Debug, io::Error};
#[derive(Debug, thiserror::Error)]
pub(crate) enum ErrorKind {
#[error("Error in extractor")]
Extractor(#[from] crate::extractors::ErrorKind),
pub enum MyError {
#[error("Error queueing job, {0}")]
Queue(anyhow::Error),
#[error("Error queueing job")]
Queue(#[from] BoxError),
#[error("Error in configuration")]
#[error("Error in configuration, {0}")]
Config(#[from] config::ConfigError),
#[error("Couldn't parse key")]
Pkcs8(#[from] rsa::pkcs8::Error),
#[error("Error in db, {0}")]
DbError(#[from] tokio_postgres::error::Error),
#[error("Couldn't encode public key")]
Spki(#[from] rsa::pkcs8::spki::Error),
#[error("Couldn't parse key, {0}")]
Key(#[from] KeyError),
#[error("Couldn't sign request")]
SignRequest,
#[error("Couldn't parse URI, {0}")]
Uri(#[from] XsdAnyUriError),
#[error("Response body from server exceeded limits")]
BodyTooLarge,
#[error("Couldn't make request")]
Reqwest(#[from] reqwest::Error),
#[error("Couldn't make request")]
ReqwestMiddleware(#[from] reqwest_middleware::Error),
#[error("Couldn't parse IRI")]
ParseIri(#[from] activitystreams::iri_string::validate::Error),
#[error("Couldn't normalize IRI")]
NormalizeIri(#[from] std::collections::TryReserveError),
#[error("Couldn't perform IO")]
Io(#[from] io::Error),
#[error("Couldn't perform IO, {0}")]
Io(#[from] Error),
#[error("Couldn't sign string, {0}")]
Rsa(rsa::errors::Error),
#[error("Couldn't use db")]
Sled(#[from] sled::Error),
#[error("Couldn't do the json thing")]
Json(#[from] serde_json::Error),
#[error("Couldn't sign request")]
Sign(#[from] SignError),
#[error("Couldn't sign digest")]
Signature(#[from] rsa::signature::Error),
#[error("Couldn't prepare TLS private key")]
PrepareKey(#[from] rustls::Error),
#[error("Couldn't verify signature")]
VerifySignature,
#[error("Failed to encode key der")]
DerEncode,
#[error("Couldn't build signing string, {0}")]
PrepareSign(#[from] PrepareSignError),
#[error("Couldn't parse the signature header")]
HeaderValidation(#[from] actix_web::http::header::InvalidHeaderValue),
@ -174,8 +48,11 @@ pub(crate) enum ErrorKind {
#[error("Actor ({0}), or Actor's server, is not subscribed")]
NotSubscribed(String),
#[error("Actor is not allowed, {0}")]
NotAllowed(String),
#[error("Actor is blocked, {0}")]
Blocked(String),
#[error("Actor is not whitelisted, {0}")]
Whitelist(String),
#[error("Cannot make decisions for foreign actor, {0}")]
WrongActor(String),
@ -184,20 +61,26 @@ pub(crate) enum ErrorKind {
BadActor(String, String),
#[error("Signature verification is required, but no signature was given")]
NoSignature(Option<String>),
NoSignature(String),
#[error("Wrong ActivityPub kind, {0}")]
Kind(String),
#[error("Too many CPUs")]
#[error("Too many CPUs, {0}")]
CpuCount(#[from] std::num::TryFromIntError),
#[error("Host mismatch")]
HostMismatch(#[from] CheckError),
#[error("Hosts don't match, {0}, {1}")]
HostMismatch(String, String),
#[error("Invalid or missing content type")]
ContentType,
#[error("Couldn't flush buffer")]
FlushBuffer,
#[error("Timed out while waiting on db pool, {0:?}")]
DbTimeout(TimeoutType),
#[error("Invalid algorithm provided to verifier, {0}")]
Algorithm(String),
@ -210,8 +93,8 @@ pub(crate) enum ErrorKind {
#[error("Couldn't receive request response from {0}, {1}")]
ReceiveResponse(String, String),
#[error("Response from {0} has invalid status code, {1}")]
Status(String, StatusCode),
#[error("Response has invalid status code, {0}")]
Status(StatusCode),
#[error("Expected an Object, found something else")]
ObjectFormat,
@ -225,86 +108,70 @@ pub(crate) enum ErrorKind {
#[error("Input is missing a 'id' field")]
MissingId,
#[error("IriString is missing a domain")]
MissingDomain,
#[error("URI is missing domain field")]
Domain,
#[error("Blocking operation was canceled")]
Canceled,
#[error("Not trying request due to failed breaker")]
Breaker,
#[error("Failed to extract fields from {0}")]
Extract(&'static str),
#[error("No API Token supplied")]
MissingApiToken,
}
impl ResponseError for Error {
impl ResponseError for MyError {
fn status_code(&self) -> StatusCode {
match self.kind() {
ErrorKind::NotAllowed(_) | ErrorKind::WrongActor(_) | ErrorKind::BadActor(_, _) => {
StatusCode::FORBIDDEN
match self {
MyError::Blocked(_)
| MyError::Whitelist(_)
| MyError::WrongActor(_)
| MyError::BadActor(_, _) => StatusCode::FORBIDDEN,
MyError::NotSubscribed(_) => StatusCode::UNAUTHORIZED,
MyError::Duplicate => StatusCode::ACCEPTED,
MyError::Kind(_) | MyError::MissingKind | MyError::MissingId | MyError::ObjectCount => {
StatusCode::BAD_REQUEST
}
ErrorKind::NotSubscribed(_) => StatusCode::UNAUTHORIZED,
ErrorKind::Duplicate => StatusCode::ACCEPTED,
ErrorKind::Kind(_)
| ErrorKind::MissingKind
| ErrorKind::MissingId
| ErrorKind::ObjectCount
| ErrorKind::NoSignature(_) => StatusCode::BAD_REQUEST,
_ => StatusCode::INTERNAL_SERVER_ERROR,
}
}
fn error_response(&self) -> HttpResponse {
HttpResponse::build(self.status_code())
.insert_header(("Content-Type", "application/activity+json"))
.body(
serde_json::to_string(&serde_json::json!({
"error": self.kind().to_string(),
}))
.unwrap_or_else(|_| "{}".to_string()),
)
.header("Content-Type", "application/activity+json")
.json(serde_json::json!({
"error": self.to_string(),
}))
}
}
impl From<BlockingError> for ErrorKind {
fn from(_: BlockingError) -> Self {
ErrorKind::Canceled
impl<T> From<BlockingError<T>> for MyError
where
T: Into<MyError> + Debug,
{
fn from(e: BlockingError<T>) -> Self {
match e {
BlockingError::Error(e) => e.into(),
BlockingError::Canceled => MyError::Canceled,
}
}
}
impl From<JoinError> for ErrorKind {
fn from(_: JoinError) -> Self {
ErrorKind::Canceled
impl<T> From<PoolError<T>> for MyError
where
T: Into<MyError>,
{
fn from(e: PoolError<T>) -> Self {
match e {
PoolError::Backend(e) => e.into(),
PoolError::Timeout(t) => MyError::DbTimeout(t),
}
}
}
impl From<Infallible> for ErrorKind {
impl From<Infallible> for MyError {
fn from(i: Infallible) -> Self {
match i {}
}
}
impl From<rsa::errors::Error> for ErrorKind {
impl From<rsa::errors::Error> for MyError {
fn from(e: rsa::errors::Error) -> Self {
ErrorKind::Rsa(e)
}
}
impl From<http_signature_normalization_actix::Canceled> for ErrorKind {
fn from(_: http_signature_normalization_actix::Canceled) -> Self {
Self::Canceled
}
}
impl From<http_signature_normalization_reqwest::Canceled> for ErrorKind {
fn from(_: http_signature_normalization_reqwest::Canceled) -> Self {
Self::Canceled
MyError::Rsa(e)
}
}

View file

@ -1,202 +0,0 @@
use actix_web::{
dev::Payload,
error::ParseError,
http::header::{from_one_raw_str, Header, HeaderName, HeaderValue, TryIntoHeaderValue},
web::Data,
FromRequest, HttpMessage, HttpRequest,
};
use bcrypt::{BcryptError, DEFAULT_COST};
use http_signature_normalization_actix::{prelude::InvalidHeaderValue, Canceled, Spawn};
use std::{convert::Infallible, str::FromStr, time::Instant};
use crate::{db::Db, error::Error, future::LocalBoxFuture, spawner::Spawner};
#[derive(Clone)]
pub(crate) struct AdminConfig {
hashed_api_token: String,
}
impl AdminConfig {
pub(crate) fn build(api_token: &str) -> Result<Self, Error> {
Ok(AdminConfig {
hashed_api_token: bcrypt::hash(api_token, DEFAULT_COST).map_err(Error::bcrypt_hash)?,
})
}
fn verify(&self, token: XApiToken) -> Result<bool, Error> {
bcrypt::verify(token.0, &self.hashed_api_token).map_err(Error::bcrypt_verify)
}
}
pub(crate) struct Admin {
db: Data<Db>,
}
type PrepareTuple = (Data<Db>, Data<AdminConfig>, Data<Spawner>, XApiToken);
impl Admin {
fn prepare_verify(req: &HttpRequest) -> Result<PrepareTuple, Error> {
let hashed_api_token = req
.app_data::<Data<AdminConfig>>()
.ok_or_else(Error::missing_config)?
.clone();
let x_api_token = XApiToken::parse(req).map_err(Error::parse_header)?;
let db = req
.app_data::<Data<Db>>()
.ok_or_else(Error::missing_db)?
.clone();
let spawner = req
.app_data::<Data<Spawner>>()
.ok_or_else(Error::missing_spawner)?
.clone();
Ok((db, hashed_api_token, spawner, x_api_token))
}
#[tracing::instrument(level = "debug", skip_all)]
async fn verify(
hashed_api_token: Data<AdminConfig>,
spawner: Data<Spawner>,
x_api_token: XApiToken,
) -> Result<(), Error> {
let span = tracing::Span::current();
if spawner
.spawn_blocking(move || span.in_scope(|| hashed_api_token.verify(x_api_token)))
.await
.map_err(Error::canceled)??
{
return Ok(());
}
Err(Error::invalid())
}
pub(crate) fn db_ref(&self) -> &Db {
&self.db
}
}
impl Error {
fn invalid() -> Self {
Error::from(ErrorKind::Invalid)
}
fn missing_config() -> Self {
Error::from(ErrorKind::MissingConfig)
}
fn missing_db() -> Self {
Error::from(ErrorKind::MissingDb)
}
fn missing_spawner() -> Self {
Error::from(ErrorKind::MissingSpawner)
}
fn bcrypt_verify(e: BcryptError) -> Self {
Error::from(ErrorKind::BCryptVerify(e))
}
fn bcrypt_hash(e: BcryptError) -> Self {
Error::from(ErrorKind::BCryptHash(e))
}
fn parse_header(e: ParseError) -> Self {
Error::from(ErrorKind::ParseHeader(e))
}
fn canceled(_: Canceled) -> Self {
Error::from(ErrorKind::Canceled)
}
}
#[derive(Debug, thiserror::Error)]
pub(crate) enum ErrorKind {
#[error("Invalid API Token")]
Invalid,
#[error("Missing Config")]
MissingConfig,
#[error("Missing Db")]
MissingDb,
#[error("Missing Spawner")]
MissingSpawner,
#[error("Panic in verify")]
Canceled,
#[error("Verifying")]
BCryptVerify(#[source] BcryptError),
#[error("Hashing")]
BCryptHash(#[source] BcryptError),
#[error("Parse Header")]
ParseHeader(#[source] ParseError),
}
impl FromRequest for Admin {
type Error = Error;
type Future = LocalBoxFuture<'static, Result<Self, Self::Error>>;
fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future {
let now = Instant::now();
let res = Self::prepare_verify(req);
Box::pin(async move {
let (db, c, s, t) = res?;
Self::verify(c, s, t).await?;
metrics::histogram!("relay.admin.verify")
.record(now.elapsed().as_micros() as f64 / 1_000_000_f64);
Ok(Admin { db })
})
}
}
pub(crate) struct XApiToken(String);
impl XApiToken {
pub(crate) fn new(token: String) -> Self {
Self(token)
}
pub(crate) const fn http1_name() -> reqwest::header::HeaderName {
reqwest::header::HeaderName::from_static("x-api-token")
}
}
impl Header for XApiToken {
fn name() -> HeaderName {
HeaderName::from_static("x-api-token")
}
fn parse<M: HttpMessage>(msg: &M) -> Result<Self, ParseError> {
from_one_raw_str(msg.headers().get(Self::name()))
}
}
impl TryIntoHeaderValue for XApiToken {
type Error = InvalidHeaderValue;
fn try_into_value(self) -> Result<HeaderValue, Self::Error> {
HeaderValue::from_str(&self.0)
}
}
impl FromStr for XApiToken {
type Err = Infallible;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(XApiToken(s.to_string()))
}
}
impl std::fmt::Display for XApiToken {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
}
}

View file

@ -1,4 +0,0 @@
use std::{future::Future, pin::Pin};
pub(crate) type LocalBoxFuture<'a, T> = Pin<Box<dyn Future<Output = T> + 'a>>;
pub(crate) type BoxFuture<'a, T> = Pin<Box<dyn Future<Output = T> + Send + 'a>>;

View file

@ -1,18 +0,0 @@
pub(crate) fn name_to_http02(
name: &reqwest::header::HeaderName,
) -> actix_web::http::header::HeaderName {
actix_web::http::header::HeaderName::from_bytes(name.as_ref())
.expect("headername conversions always work")
}
pub(crate) fn value_to_http02(
value: &reqwest::header::HeaderValue,
) -> actix_web::http::header::HeaderValue {
actix_web::http::header::HeaderValue::from_bytes(value.as_bytes())
.expect("headervalue conversions always work")
}
pub(crate) fn status_to_http02(status: reqwest::StatusCode) -> actix_web::http::StatusCode {
actix_web::http::StatusCode::from_u16(status.as_u16())
.expect("statuscode conversions always work")
}

View file

@ -1,201 +0,0 @@
pub mod apub;
mod contact;
mod deliver;
mod deliver_many;
mod instance;
mod nodeinfo;
mod process_listeners;
mod record_last_online;
pub(crate) use self::{
contact::QueryContact, deliver::Deliver, deliver_many::DeliverMany, instance::QueryInstance,
nodeinfo::QueryNodeinfo,
};
use crate::{
config::Config,
data::{ActorCache, MediaCache, State},
error::{Error, ErrorKind},
jobs::{process_listeners::Listeners, record_last_online::RecordLastOnline},
};
use background_jobs::{
memory_storage::{Storage, TokioTimer},
metrics::MetricsStorage,
tokio::{QueueHandle, WorkerConfig},
Job,
};
use std::time::Duration;
fn debug_object(activity: &serde_json::Value) -> &serde_json::Value {
let mut object = &activity["object"]["type"];
if object.is_null() {
object = &activity["object"]["id"];
}
if object.is_null() {
object = &activity["object"];
}
object
}
pub(crate) fn build_storage() -> MetricsStorage<Storage<TokioTimer>> {
MetricsStorage::wrap(Storage::new(TokioTimer))
}
pub(crate) fn create_workers(
storage: MetricsStorage<Storage<TokioTimer>>,
state: State,
actors: ActorCache,
media: MediaCache,
config: Config,
) -> std::io::Result<JobServer> {
let deliver_concurrency = config.deliver_concurrency();
let queue_handle = WorkerConfig::new(storage, move |queue_handle| {
JobState::new(
state.clone(),
actors.clone(),
JobServer::new(queue_handle),
media.clone(),
config.clone(),
)
})
.register::<Deliver>()
.register::<DeliverMany>()
.register::<QueryNodeinfo>()
.register::<QueryInstance>()
.register::<Listeners>()
.register::<QueryContact>()
.register::<RecordLastOnline>()
.register::<apub::Announce>()
.register::<apub::Follow>()
.register::<apub::Forward>()
.register::<apub::Reject>()
.register::<apub::Undo>()
.set_worker_count("maintenance", 2)
.set_worker_count("apub", 2)
.set_worker_count("deliver", deliver_concurrency)
.start()?;
queue_handle.every(Duration::from_secs(60 * 5), Listeners)?;
queue_handle.every(Duration::from_secs(60 * 10), RecordLastOnline)?;
Ok(JobServer::new(queue_handle))
}
#[derive(Clone, Debug)]
pub(crate) struct JobState {
state: State,
actors: ActorCache,
config: Config,
media: MediaCache,
job_server: JobServer,
}
#[derive(Clone)]
pub(crate) struct JobServer {
remote: QueueHandle,
}
impl std::fmt::Debug for JobServer {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("JobServer")
.field("queue_handle", &"QueueHandle")
.finish()
}
}
impl JobState {
fn new(
state: State,
actors: ActorCache,
job_server: JobServer,
media: MediaCache,
config: Config,
) -> Self {
JobState {
state,
actors,
config,
media,
job_server,
}
}
}
impl JobServer {
fn new(remote_handle: QueueHandle) -> Self {
JobServer {
remote: remote_handle,
}
}
pub(crate) async fn queue<J>(&self, job: J) -> Result<(), Error>
where
J: Job,
{
self.remote
.queue(job)
.await
.map_err(ErrorKind::Queue)
.map_err(Into::into)
}
}
struct Boolish {
inner: bool,
}
impl std::ops::Deref for Boolish {
type Target = bool;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl<'de> serde::Deserialize<'de> for Boolish {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
#[derive(serde::Deserialize)]
#[serde(untagged)]
enum BoolThing {
Bool(bool),
String(String),
}
let thing: BoolThing = serde::Deserialize::deserialize(deserializer)?;
match thing {
BoolThing::Bool(inner) => Ok(Boolish { inner }),
BoolThing::String(s) if s.to_lowercase() == "false" => Ok(Boolish { inner: false }),
BoolThing::String(_) => Ok(Boolish { inner: true }),
}
}
}
#[cfg(test)]
mod tests {
use super::Boolish;
#[test]
fn boolish_works() {
const CASES: &[(&str, bool)] = &[
("false", false),
("\"false\"", false),
("\"FALSE\"", false),
("true", true),
("\"true\"", true),
("\"anything else\"", true),
];
for (case, output) in CASES {
let b: Boolish = serde_json::from_str(case).unwrap();
assert_eq!(*b, *output);
}
}
}

View file

@ -1,48 +1,37 @@
use crate::{
config::{Config, UrlKind},
db::Actor,
error::Error,
future::BoxFuture,
data::Actor,
error::MyError,
jobs::{
apub::{get_inboxes, prepare_activity},
DeliverMany, JobState,
},
};
use activitystreams::{activity::Announce as AsAnnounce, iri_string::types::IriString};
use background_jobs::Job;
use activitystreams_new::{activity::Announce as AsAnnounce, primitives::XsdAnyUri};
use background_jobs::ActixJob;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Announce {
object_id: IriString,
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct Announce {
object_id: XsdAnyUri,
actor: Actor,
}
impl std::fmt::Debug for Announce {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Announce")
.field("object_id", &self.object_id.to_string())
.field("actor_id", &self.actor.id)
.finish()
}
}
impl Announce {
pub fn new(object_id: IriString, actor: Actor) -> Self {
pub fn new(object_id: XsdAnyUri, actor: Actor) -> Self {
Announce { object_id, actor }
}
#[tracing::instrument(name = "Announce", skip(state))]
async fn perform(self, state: JobState) -> Result<(), Error> {
async fn perform(self, state: JobState) -> Result<(), anyhow::Error> {
let activity_id = state.config.generate_url(UrlKind::Activity);
let announce = generate_announce(&state.config, &activity_id, &self.object_id)?;
let inboxes = get_inboxes(&state.state, &self.actor, &self.object_id).await?;
state
.job_server
.queue(DeliverMany::new(inboxes, announce)?)
.await?;
.queue(DeliverMany::new(inboxes, announce)?)?;
state.state.cache(self.object_id, activity_id);
state.state.cache(self.object_id, activity_id).await;
Ok(())
}
}
@ -50,9 +39,9 @@ impl Announce {
// Generate a type that says "Look at this object"
fn generate_announce(
config: &Config,
activity_id: &IriString,
object_id: &IriString,
) -> Result<AsAnnounce, Error> {
activity_id: &XsdAnyUri,
object_id: &XsdAnyUri,
) -> Result<AsAnnounce, MyError> {
let announce = AsAnnounce::new(config.generate_url(UrlKind::Actor), object_id.clone());
prepare_activity(
@ -62,13 +51,11 @@ fn generate_announce(
)
}
impl Job for Announce {
impl ActixJob for Announce {
type State = JobState;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
const NAME: &'static str = "relay::jobs::apub::Announce";
const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state))

View file

@ -1,77 +1,60 @@
use crate::{
apub::AcceptedActivities,
config::{Config, UrlKind},
db::Actor,
error::{Error, ErrorKind},
future::BoxFuture,
jobs::{apub::prepare_activity, Deliver, JobState, QueryInstance, QueryNodeinfo},
data::Actor,
error::MyError,
jobs::{apub::prepare_activity, Deliver, JobState},
};
use activitystreams::{
use activitystreams_new::{
activity::{Accept as AsAccept, Follow as AsFollow},
iri_string::types::IriString,
prelude::*,
primitives::XsdAnyUri,
};
use background_jobs::Job;
use background_jobs::ActixJob;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Follow {
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct Follow {
is_listener: bool,
input: AcceptedActivities,
actor: Actor,
}
impl std::fmt::Debug for Follow {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Follow")
.field("input", &self.input.id_unchecked())
.field("actor", &self.actor.id)
.finish()
}
}
impl Follow {
pub fn new(input: AcceptedActivities, actor: Actor) -> Self {
Follow { input, actor }
pub fn new(is_listener: bool, input: AcceptedActivities, actor: Actor) -> Self {
Follow {
is_listener,
input,
actor,
}
}
#[tracing::instrument(name = "Follow", skip(state))]
async fn perform(self, state: JobState) -> Result<(), Error> {
async fn perform(self, state: JobState) -> Result<(), anyhow::Error> {
if !self.is_listener {
state.db.add_listener(self.actor.inbox.clone()).await?;
}
let my_id = state.config.generate_url(UrlKind::Actor);
// if following relay directly, not just following 'public', followback
if self.input.object_is(&my_id)
&& !state.state.db.is_connected(self.actor.id.clone()).await?
{
if self.input.object_is(&my_id) && !state.actors.is_following(&self.actor.id).await {
let follow = generate_follow(&state.config, &self.actor.id, &my_id)?;
state
.job_server
.queue(Deliver::new(self.actor.inbox.clone(), follow)?)
.await?;
.queue(Deliver::new(self.actor.inbox.clone(), follow)?)?;
}
state.actors.add_connection(self.actor.clone()).await?;
state.actors.follower(&self.actor).await?;
let accept = generate_accept_follow(
&state.config,
&self.actor.id,
self.input.id_unchecked().ok_or(ErrorKind::MissingId)?,
self.input.id().ok_or(MyError::MissingId)?,
&my_id,
)?;
state
.job_server
.queue(Deliver::new(self.actor.inbox, accept)?)
.await?;
state
.job_server
.queue(QueryInstance::new(self.actor.id.clone()))
.await?;
state
.job_server
.queue(QueryNodeinfo::new(self.actor.id))
.await?;
.queue(Deliver::new(self.actor.inbox, accept)?)?;
Ok(())
}
}
@ -79,9 +62,9 @@ impl Follow {
// Generate a type that says "I want to follow you"
fn generate_follow(
config: &Config,
actor_id: &IriString,
my_id: &IriString,
) -> Result<AsFollow, Error> {
actor_id: &XsdAnyUri,
my_id: &XsdAnyUri,
) -> Result<AsFollow, MyError> {
let follow = AsFollow::new(my_id.clone(), actor_id.clone());
prepare_activity(
@ -94,10 +77,10 @@ fn generate_follow(
// Generate a type that says "I accept your follow request"
fn generate_accept_follow(
config: &Config,
actor_id: &IriString,
input_id: &IriString,
my_id: &IriString,
) -> Result<AsAccept, Error> {
actor_id: &XsdAnyUri,
input_id: &XsdAnyUri,
my_id: &XsdAnyUri,
) -> Result<AsAccept, MyError> {
let mut follow = AsFollow::new(actor_id.clone(), my_id.clone());
follow.set_id(input_id.clone());
@ -111,13 +94,11 @@ fn generate_accept_follow(
)
}
impl Job for Follow {
impl ActixJob for Follow {
type State = JobState;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
const NAME: &'static str = "relay::jobs::apub::Follow";
const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state))

View file

@ -1,59 +1,46 @@
use crate::{
apub::AcceptedActivities,
db::Actor,
error::{Error, ErrorKind},
future::BoxFuture,
data::Actor,
error::MyError,
jobs::{apub::get_inboxes, DeliverMany, JobState},
};
use activitystreams::prelude::*;
use background_jobs::Job;
use activitystreams_new::prelude::*;
use background_jobs::ActixJob;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Forward {
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct Forward {
input: AcceptedActivities,
actor: Actor,
}
impl std::fmt::Debug for Forward {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Forward")
.field("input", &self.input.id_unchecked())
.field("actor", &self.actor.id)
.finish()
}
}
impl Forward {
pub fn new(input: AcceptedActivities, actor: Actor) -> Self {
Forward { input, actor }
}
#[tracing::instrument(name = "Forward", skip(state))]
async fn perform(self, state: JobState) -> Result<(), Error> {
async fn perform(self, state: JobState) -> Result<(), anyhow::Error> {
let object_id = self
.input
.object_unchecked()
.object()
.as_single_id()
.ok_or(ErrorKind::MissingId)?;
.ok_or(MyError::MissingId)?;
let inboxes = get_inboxes(&state.state, &self.actor, object_id).await?;
state
.job_server
.queue(DeliverMany::new(inboxes, self.input)?)
.await?;
.queue(DeliverMany::new(inboxes, self.input)?)?;
Ok(())
}
}
impl Job for Forward {
impl ActixJob for Forward {
type State = JobState;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
const NAME: &'static str = "relay::jobs::apub::Forward";
const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state))

View file

@ -1,14 +1,13 @@
use crate::{
config::{Config, UrlKind},
data::State,
db::Actor,
error::{Error, ErrorKind},
data::{Actor, State},
error::MyError,
};
use activitystreams::{
use activitystreams_new::{
activity::{Follow as AsFollow, Undo as AsUndo},
context,
iri_string::types::IriString,
prelude::*,
primitives::XsdAnyUri,
security,
};
use std::convert::TryInto;
@ -19,31 +18,30 @@ mod forward;
mod reject;
mod undo;
pub(crate) use self::{
announce::Announce, follow::Follow, forward::Forward, reject::Reject, undo::Undo,
};
pub use self::{announce::Announce, follow::Follow, forward::Forward, reject::Reject, undo::Undo};
async fn get_inboxes(
state: &State,
actor: &Actor,
object_id: &IriString,
) -> Result<Vec<IriString>, Error> {
let authority = object_id
.authority_str()
.ok_or(ErrorKind::Domain)?
object_id: &XsdAnyUri,
) -> Result<Vec<XsdAnyUri>, MyError> {
let domain = object_id
.as_url()
.host()
.ok_or(MyError::Domain)?
.to_string();
state.inboxes_without(&actor.inbox, &authority).await
Ok(state.listeners_without(&actor.inbox, &domain).await)
}
fn prepare_activity<T, U, V>(
fn prepare_activity<T, U, V, Kind>(
mut t: T,
id: impl TryInto<IriString, Error = U>,
to: impl TryInto<IriString, Error = V>,
) -> Result<T, Error>
id: impl TryInto<XsdAnyUri, Error = U>,
to: impl TryInto<XsdAnyUri, Error = V>,
) -> Result<T, MyError>
where
T: ObjectExt + BaseExt,
Error: From<U> + From<V>,
T: ObjectExt<Kind> + BaseExt<Kind>,
MyError: From<U> + From<V>,
{
t.set_id(id.try_into()?)
.set_many_tos(vec![to.try_into()?])
@ -54,9 +52,9 @@ where
// Generate a type that says "I want to stop following you"
fn generate_undo_follow(
config: &Config,
actor_id: &IriString,
my_id: &IriString,
) -> Result<AsUndo, Error> {
actor_id: &XsdAnyUri,
my_id: &XsdAnyUri,
) -> Result<AsUndo, MyError> {
let mut follow = AsFollow::new(my_id.clone(), actor_id.clone());
follow.set_id(config.generate_url(UrlKind::Activity));

View file

@ -1,45 +1,34 @@
use crate::{
config::UrlKind,
db::Actor,
error::Error,
future::BoxFuture,
data::Actor,
jobs::{apub::generate_undo_follow, Deliver, JobState},
};
use background_jobs::Job;
use background_jobs::ActixJob;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Reject(pub(crate) Actor);
impl std::fmt::Debug for Reject {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Reject").field("actor", &self.0.id).finish()
}
}
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct Reject(pub Actor);
impl Reject {
#[tracing::instrument(name = "Reject", skip(state))]
async fn perform(self, state: JobState) -> Result<(), Error> {
state.actors.remove_connection(&self.0).await?;
async fn perform(self, state: JobState) -> Result<(), anyhow::Error> {
if state.actors.unfollower(&self.0).await?.is_some() {
state.db.remove_listener(self.0.inbox.clone()).await?;
}
let my_id = state.config.generate_url(UrlKind::Actor);
let undo = generate_undo_follow(&state.config, &self.0.id, &my_id)?;
state
.job_server
.queue(Deliver::new(self.0.inbox, undo)?)
.await?;
state.job_server.queue(Deliver::new(self.0.inbox, undo)?)?;
Ok(())
}
}
impl Job for Reject {
impl ActixJob for Reject {
type State = JobState;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
const NAME: &'static str = "relay::jobs::apub::Reject";
const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state))

View file

@ -1,60 +1,47 @@
use crate::{
apub::AcceptedActivities,
config::UrlKind,
db::Actor,
error::Error,
future::BoxFuture,
data::Actor,
jobs::{apub::generate_undo_follow, Deliver, JobState},
};
use activitystreams::prelude::BaseExt;
use background_jobs::Job;
use background_jobs::ActixJob;
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Undo {
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct Undo {
input: AcceptedActivities,
actor: Actor,
}
impl std::fmt::Debug for Undo {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Undo")
.field("input", &self.input.id_unchecked())
.field("actor", &self.actor.id)
.finish()
}
}
impl Undo {
pub(crate) fn new(input: AcceptedActivities, actor: Actor) -> Self {
pub fn new(input: AcceptedActivities, actor: Actor) -> Self {
Undo { input, actor }
}
#[tracing::instrument(name = "Undo", skip(state))]
async fn perform(self, state: JobState) -> Result<(), Error> {
let was_following = state.state.db.is_connected(self.actor.id.clone()).await?;
async fn perform(self, state: JobState) -> Result<(), anyhow::Error> {
let was_following = state.actors.is_following(&self.actor.id).await;
state.actors.remove_connection(&self.actor).await?;
if state.actors.unfollower(&self.actor).await?.is_some() {
state.db.remove_listener(self.actor.inbox.clone()).await?;
}
if was_following {
let my_id = state.config.generate_url(UrlKind::Actor);
let undo = generate_undo_follow(&state.config, &self.actor.id, &my_id)?;
state
.job_server
.queue(Deliver::new(self.actor.inbox, undo)?)
.await?;
.queue(Deliver::new(self.actor.inbox, undo)?)?;
}
Ok(())
}
}
impl Job for Undo {
impl ActixJob for Undo {
type State = JobState;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
type Future = Pin<Box<dyn Future<Output = Result<(), anyhow::Error>>>>;
const NAME: &'static str = "relay::jobs::apub::Undo";
const QUEUE: &'static str = "apub";
fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state))

44
src/jobs/cache_media.rs Normal file
View file

@ -0,0 +1,44 @@
use crate::jobs::JobState;
use anyhow::Error;
use background_jobs::ActixJob;
use std::{future::Future, pin::Pin};
use uuid::Uuid;
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct CacheMedia {
uuid: Uuid,
}
impl CacheMedia {
pub fn new(uuid: Uuid) -> Self {
CacheMedia { uuid }
}
async fn perform(self, state: JobState) -> Result<(), Error> {
if state.media.get_bytes(self.uuid).await.is_some() {
return Ok(());
}
if let Some(url) = state.media.get_url(self.uuid).await? {
let (content_type, bytes) = state.requests.fetch_bytes(url.as_str()).await?;
state
.media
.store_bytes(self.uuid, content_type, bytes)
.await;
}
Ok(())
}
}
impl ActixJob for CacheMedia {
type State = JobState;
type Future = Pin<Box<dyn Future<Output = Result<(), Error>>>>;
const NAME: &'static str = "relay::jobs::CacheMedia";
fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state))
}
}

View file

@ -1,112 +0,0 @@
use crate::{
apub::AcceptedActors,
error::{Error, ErrorKind},
future::BoxFuture,
jobs::JobState,
requests::BreakerStrategy,
};
use activitystreams::{iri_string::types::IriString, object::Image, prelude::*};
use background_jobs::Job;
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct QueryContact {
actor_id: IriString,
contact_id: IriString,
}
impl std::fmt::Debug for QueryContact {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("QueryContact")
.field("actor_id", &self.actor_id.to_string())
.field("contact_id", &self.contact_id.to_string())
.finish()
}
}
impl QueryContact {
pub(crate) fn new(actor_id: IriString, contact_id: IriString) -> Self {
QueryContact {
actor_id,
contact_id,
}
}
async fn perform(self, state: JobState) -> Result<(), Error> {
let contact_outdated = state
.state
.node_cache
.is_contact_outdated(self.actor_id.clone())
.await;
if !contact_outdated {
return Ok(());
}
let contact = match state
.state
.requests
.fetch::<AcceptedActors>(&self.contact_id, BreakerStrategy::Allow404AndBelow)
.await
{
Ok(contact) => contact,
Err(e) if e.is_breaker() => {
tracing::debug!("Not retrying due to failed breaker");
return Ok(());
}
Err(e) => return Err(e),
};
let (username, display_name, url, avatar) =
to_contact(contact).ok_or(ErrorKind::Extract("contact"))?;
state
.state
.node_cache
.set_contact(self.actor_id, username, display_name, url, avatar)
.await?;
Ok(())
}
}
fn to_contact(contact: AcceptedActors) -> Option<(String, String, IriString, IriString)> {
let username = contact.preferred_username()?.to_owned();
let display_name = contact.name()?.as_one()?.as_xsd_string()?.to_owned();
let url = contact.url()?.as_single_id()?.to_owned();
let any_base = contact.icon()?.as_one()?;
let avatar = Image::from_any_base(any_base.clone())
.ok()??
.url()?
.as_single_id()?
.to_owned();
Some((username, display_name, url, avatar))
}
impl Job for QueryContact {
type State = JobState;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::QueryContact";
const QUEUE: &'static str = "maintenance";
fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state))
}
}
#[cfg(test)]
mod tests {
use super::to_contact;
const HYNET_ADMIN: &str = r#"{"@context":["https://www.w3.org/ns/activitystreams","https://soc.hyena.network/schemas/litepub-0.1.jsonld",{"@language":"und"}],"alsoKnownAs":[],"attachment":[{"name":"Website","type":"PropertyValue","value":"https://hyena.network/"},{"name":"Services","type":"PropertyValue","value":"Pleroma, Invidious, SearX, XMPP"},{"name":"CW","type":"PropertyValue","value":"all long posts"}],"capabilities":{"acceptsChatMessages":true},"discoverable":true,"endpoints":{"oauthAuthorizationEndpoint":"https://soc.hyena.network/oauth/authorize","oauthRegistrationEndpoint":"https://soc.hyena.network/api/v1/apps","oauthTokenEndpoint":"https://soc.hyena.network/oauth/token","sharedInbox":"https://soc.hyena.network/inbox","uploadMedia":"https://soc.hyena.network/api/ap/upload_media"},"followers":"https://soc.hyena.network/users/HyNET/followers","following":"https://soc.hyena.network/users/HyNET/following","icon":{"type":"Image","url":"https://soc.hyena.network/media/ab149b1e0196ffdbecc6830c7f6f1a14dd8d8408ec7db0f1e8ad9d40e600ea73.gif"},"id":"https://soc.hyena.network/users/HyNET","image":{"type":"Image","url":"https://soc.hyena.network/media/12ba78d3015e13aa65ac4e106e574dd7bf959614585f10ce85de40e0148da677.png"},"inbox":"https://soc.hyena.network/users/HyNET/inbox","manuallyApprovesFollowers":false,"name":"HyNET Announcement System :glider:","outbox":"https://soc.hyena.network/users/HyNET/outbox","preferredUsername":"HyNET","publicKey":{"id":"https://soc.hyena.network/users/HyNET#main-key","owner":"https://soc.hyena.network/users/HyNET","publicKeyPem":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyF74womumWRhR7RW4Q6a\n2+Av/Ue8QHiKwjQARJEakbKnKgkI5FRFVVOfMiYVJp/juNt4GLgK15panBqJa9Yt\nWACiHQjBd2yVI5tIHiae0uBj5SdUVuduoycVLG0lpJsg12p8m/vL1oaeLqehTqa6\nsYplQh1GCLet0cUdn/66Cj2pAPD3V7Bz3VnG+oyXIsGQbBB8RHnWhFH8b0qQOyur\nJRAB8aye6QAL2sQbfISM2lycWzNeIHkqsUb7FdqdhQ+Ze0rETRGDkOO2Qvpg0hQm\n6owMsHnHA/DzyOHLy6Yf+I3OUlBC/P1SSAKwORsifFDXL322AEqoDi5ZpwzG9m5z\nAQIDAQAB\n-----END PUBLIC KEY-----\n\n"},"summary":"Ran by <span class=\"h-card\"><a class=\"u-url mention\" data-user=\"9s8j4AHGt3ED0P0b6e\" href=\"https://soc.hyena.network/users/mel\" rel=\"ugc\">@<span>mel</span></a></span> :adm1::adm2: <br/>For direct help with the service, send <span class=\"h-card\"><a class=\"u-url mention\" data-user=\"9s8j4AHGt3ED0P0b6e\" href=\"https://soc.hyena.network/users/mel\" rel=\"ugc\">@<span>mel</span></a></span> a message.","tag":[{"icon":{"type":"Image","url":"https://soc.hyena.network/emoji/Signs/adm1.png"},"id":"https://soc.hyena.network/emoji/Signs/adm1.png","name":":adm1:","type":"Emoji","updated":"1970-01-01T00:00:00Z"},{"icon":{"type":"Image","url":"https://soc.hyena.network/emoji/Signs/adm2.png"},"id":"https://soc.hyena.network/emoji/Signs/adm2.png","name":":adm2:","type":"Emoji","updated":"1970-01-01T00:00:00Z"},{"icon":{"type":"Image","url":"https://soc.hyena.network/emoji/misc/glider.png"},"id":"https://soc.hyena.network/emoji/misc/glider.png","name":":glider:","type":"Emoji","updated":"1970-01-01T00:00:00Z"}],"type":"Service","url":"https://soc.hyena.network/users/HyNET"}"#;
#[test]
fn parse_hynet() {
let actor = serde_json::from_str(HYNET_ADMIN).unwrap();
to_contact(actor).unwrap();
}
}

View file

@ -1,30 +1,17 @@
use crate::{
error::Error,
future::BoxFuture,
jobs::{debug_object, JobState},
requests::BreakerStrategy,
};
use activitystreams::iri_string::types::IriString;
use background_jobs::{Backoff, Job};
use crate::{error::MyError, jobs::JobState};
use activitystreams_new::primitives::XsdAnyUri;
use anyhow::Error;
use background_jobs::{ActixJob, Backoff};
use std::{future::Future, pin::Pin};
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct Deliver {
to: IriString,
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct Deliver {
to: XsdAnyUri,
data: serde_json::Value,
}
impl std::fmt::Debug for Deliver {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Deliver")
.field("to", &self.to.to_string())
.field("activity", &self.data["type"])
.field("object", debug_object(&self.data))
.finish()
}
}
impl Deliver {
pub(crate) fn new<T>(to: IriString, data: T) -> Result<Self, Error>
pub fn new<T>(to: XsdAnyUri, data: T) -> Result<Self, MyError>
where
T: serde::ser::Serialize,
{
@ -33,39 +20,20 @@ impl Deliver {
data: serde_json::to_value(data)?,
})
}
#[tracing::instrument(name = "Deliver", skip(state))]
async fn perform(self, state: JobState) -> Result<(), Error> {
if let Err(e) = state
.state
.requests
.deliver(&self.to, &self.data, BreakerStrategy::Allow401AndBelow)
.await
{
if e.is_breaker() {
tracing::debug!("Not trying due to failed breaker");
return Ok(());
}
if e.is_bad_request() {
tracing::debug!("Server didn't understand the activity");
return Ok(());
}
return Err(e);
}
Ok(())
}
}
impl Job for Deliver {
impl ActixJob for Deliver {
type State = JobState;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
type Future = Pin<Box<dyn Future<Output = Result<(), Error>>>>;
const NAME: &'static str = "relay::jobs::Deliver";
const QUEUE: &'static str = "deliver";
const BACKOFF: Backoff = Backoff::Exponential(8);
fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state))
Box::pin(async move {
state.requests.deliver(self.to, &self.data).await?;
Ok(())
})
}
}

View file

@ -1,28 +1,20 @@
use crate::{
error::Error,
future::BoxFuture,
jobs::{debug_object, Deliver, JobState},
error::MyError,
jobs::{Deliver, JobState},
};
use activitystreams::iri_string::types::IriString;
use background_jobs::Job;
use activitystreams_new::primitives::XsdAnyUri;
use anyhow::Error;
use background_jobs::ActixJob;
use futures::future::{ready, Ready};
#[derive(Clone, serde::Deserialize, serde::Serialize)]
pub(crate) struct DeliverMany {
to: Vec<IriString>,
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub struct DeliverMany {
to: Vec<XsdAnyUri>,
data: serde_json::Value,
}
impl std::fmt::Debug for DeliverMany {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("DeliverMany")
.field("activity", &self.data["type"])
.field("object", debug_object(&self.data))
.finish()
}
}
impl DeliverMany {
pub(crate) fn new<T>(to: Vec<IriString>, data: T) -> Result<Self, Error>
pub fn new<T>(to: Vec<XsdAnyUri>, data: T) -> Result<Self, MyError>
where
T: serde::ser::Serialize,
{
@ -32,28 +24,24 @@ impl DeliverMany {
})
}
#[tracing::instrument(name = "Deliver many", skip(state))]
async fn perform(self, state: JobState) -> Result<(), Error> {
fn perform(self, state: JobState) -> Result<(), Error> {
for inbox in self.to {
state
.job_server
.queue(Deliver::new(inbox, self.data.clone())?)
.await?;
.queue(Deliver::new(inbox, self.data.clone())?)?;
}
Ok(())
}
}
impl Job for DeliverMany {
impl ActixJob for DeliverMany {
type State = JobState;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
type Future = Ready<Result<(), Error>>;
const NAME: &'static str = "relay::jobs::DeliverMany";
const QUEUE: &'static str = "deliver";
fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state))
ready(self.perform(state))
}
}

File diff suppressed because one or more lines are too long

121
src/jobs/mod.rs Normal file
View file

@ -0,0 +1,121 @@
pub mod apub;
mod cache_media;
mod deliver;
mod deliver_many;
mod instance;
mod nodeinfo;
mod process_listeners;
mod storage;
pub use self::{
cache_media::CacheMedia, deliver::Deliver, deliver_many::DeliverMany, instance::QueryInstance,
nodeinfo::QueryNodeinfo,
};
use crate::{
config::Config,
data::{ActorCache, Media, NodeCache, State},
db::Db,
error::MyError,
jobs::{process_listeners::Listeners, storage::Storage},
requests::Requests,
};
use background_jobs::{Job, QueueHandle, WorkerConfig};
use std::time::Duration;
pub fn create_server(db: Db) -> JobServer {
let shared = background_jobs::create_server(Storage::new(db));
shared.every(Duration::from_secs(60 * 5), Listeners);
JobServer::new(shared)
}
pub fn create_workers(
db: Db,
state: State,
actors: ActorCache,
job_server: JobServer,
media: Media,
config: Config,
) {
let remote_handle = job_server.remote.clone();
WorkerConfig::new(move || {
JobState::new(
db.clone(),
state.clone(),
actors.clone(),
job_server.clone(),
media.clone(),
config.clone(),
)
})
.register::<Deliver>()
.register::<DeliverMany>()
.register::<QueryNodeinfo>()
.register::<QueryInstance>()
.register::<Listeners>()
.register::<CacheMedia>()
.register::<apub::Announce>()
.register::<apub::Follow>()
.register::<apub::Forward>()
.register::<apub::Reject>()
.register::<apub::Undo>()
.set_worker_count("default", 4)
.start(remote_handle);
}
#[derive(Clone)]
pub struct JobState {
db: Db,
requests: Requests,
state: State,
actors: ActorCache,
config: Config,
media: Media,
node_cache: NodeCache,
job_server: JobServer,
}
#[derive(Clone)]
pub struct JobServer {
remote: QueueHandle,
}
impl JobState {
fn new(
db: Db,
state: State,
actors: ActorCache,
job_server: JobServer,
media: Media,
config: Config,
) -> Self {
JobState {
requests: state.requests(),
node_cache: state.node_cache(),
db,
actors,
config,
media,
state,
job_server,
}
}
}
impl JobServer {
fn new(remote_handle: QueueHandle) -> Self {
JobServer {
remote: remote_handle,
}
}
pub fn queue<J>(&self, job: J) -> Result<(), MyError>
where
J: Job,
{
self.remote.queue(job).map_err(MyError::Queue)
}
}

File diff suppressed because one or more lines are too long

View file

@ -1,35 +1,29 @@
use crate::{
error::Error,
future::BoxFuture,
jobs::{instance::QueryInstance, nodeinfo::QueryNodeinfo, JobState},
};
use background_jobs::Job;
use crate::jobs::{instance::QueryInstance, nodeinfo::QueryNodeinfo, JobState};
use anyhow::Error;
use background_jobs::ActixJob;
use std::{future::Future, pin::Pin};
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub(crate) struct Listeners;
pub struct Listeners;
impl Listeners {
#[tracing::instrument(name = "Spawn query instances", skip(state))]
async fn perform(self, state: JobState) -> Result<(), Error> {
for actor_id in state.state.db.connected_ids().await? {
for listener in state.state.listeners().await {
state
.job_server
.queue(QueryInstance::new(actor_id.clone()))
.await?;
state.job_server.queue(QueryNodeinfo::new(actor_id)).await?;
.queue(QueryInstance::new(listener.clone()))?;
state.job_server.queue(QueryNodeinfo::new(listener))?;
}
Ok(())
}
}
impl Job for Listeners {
impl ActixJob for Listeners {
type State = JobState;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
type Future = Pin<Box<dyn Future<Output = Result<(), Error>>>>;
const NAME: &'static str = "relay::jobs::Listeners";
const QUEUE: &'static str = "maintenance";
fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state))

View file

@ -1,28 +0,0 @@
use crate::{error::Error, future::BoxFuture, jobs::JobState};
use background_jobs::{Backoff, Job};
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
pub(crate) struct RecordLastOnline;
impl RecordLastOnline {
#[tracing::instrument(skip(state))]
async fn perform(self, state: JobState) -> Result<(), Error> {
let nodes = state.state.last_online.take();
state.state.db.mark_last_seen(nodes).await
}
}
impl Job for RecordLastOnline {
type State = JobState;
type Error = Error;
type Future = BoxFuture<'static, Result<(), Self::Error>>;
const NAME: &'static str = "relay::jobs::RecordLastOnline";
const QUEUE: &'static str = "maintenance";
const BACKOFF: Backoff = Backoff::Linear(1);
fn run(self, state: Self::State) -> Self::Future {
Box::pin(self.perform(state))
}
}

171
src/jobs/storage.rs Normal file
View file

@ -0,0 +1,171 @@
use crate::{db::Db, error::MyError};
use background_jobs::{dev::JobInfo, Stats};
use log::debug;
use tokio_postgres::types::Json;
use uuid::Uuid;
#[derive(Clone)]
pub struct Storage {
db: Db,
}
impl Storage {
pub fn new(db: Db) -> Self {
Storage { db }
}
}
#[async_trait::async_trait]
impl background_jobs::dev::Storage for Storage {
type Error = MyError;
async fn generate_id(&self) -> Result<Uuid, MyError> {
// TODO: Ensure unique job id
Ok(Uuid::new_v4())
}
async fn save_job(&self, job: JobInfo) -> Result<(), MyError> {
debug!(
"Inserting job {} status {} for queue {}",
job.id(),
job.status(),
job.queue()
);
self.db.pool().get().await?.execute(
"INSERT INTO jobs
(job_id, job_queue, job_timeout, job_updated, job_status, job_next_run, job_value, created_at)
VALUES
($1::UUID, $2::TEXT, $3::BIGINT, $4::TIMESTAMP, $5::TEXT, $6::TIMESTAMP, $7::JSONB, 'now')
ON CONFLICT (job_id)
DO UPDATE SET
job_updated = $4::TIMESTAMP,
job_status = $5::TEXT,
job_next_run = $6::TIMESTAMP,
job_value = $7::JSONB;",
&[&job.id(), &job.queue(), &job.timeout(), &job.updated_at().naive_utc(), &job.status().to_string(), &job.next_queue().map(|q| q.naive_utc()), &Json(&job)],
)
.await?;
Ok(())
}
async fn fetch_job(&self, id: Uuid) -> Result<Option<JobInfo>, MyError> {
debug!(
"SELECT job_value FROM jobs WHERE job_id = $1::UUID LIMIT 1; [{}]",
id
);
let row_opt = self
.db
.pool()
.get()
.await?
.query_opt(
"SELECT job_value
FROM jobs
WHERE job_id = $1::UUID
LIMIT 1;",
&[&id],
)
.await?;
let row = if let Some(row) = row_opt {
row
} else {
return Ok(None);
};
let value: Json<JobInfo> = row.try_get(0)?;
Ok(Some(value.0))
}
async fn fetch_job_from_queue(&self, queue: &str) -> Result<Option<JobInfo>, MyError> {
let row_opt = self
.db
.pool()
.get()
.await?
.query_opt(
"UPDATE jobs
SET
job_status = 'Running',
job_updated = 'now'
WHERE
job_id = (
SELECT job_id
FROM jobs
WHERE
job_queue = $1::TEXT
AND
(
job_next_run IS NULL
OR
job_next_run < now()
)
AND
(
job_status = 'Pending'
OR
(
job_status = 'Running'
AND
NOW() > (INTERVAL '1 millisecond' * job_timeout + job_updated)
)
)
LIMIT 1
FOR UPDATE SKIP LOCKED
)
RETURNING job_value;",
&[&queue],
)
.await?;
let row = if let Some(row) = row_opt {
row
} else {
return Ok(None);
};
let value: Json<JobInfo> = row.try_get(0)?;
let job = value.0;
debug!("Found job {} in queue {}", job.id(), queue);
Ok(Some(job))
}
async fn queue_job(&self, _queue: &str, _id: Uuid) -> Result<(), MyError> {
// Queue Job is a no-op, since jobs are always in their queue
Ok(())
}
async fn run_job(&self, _id: Uuid, _runner_id: Uuid) -> Result<(), MyError> {
// Run Job is a no-op, since jobs are marked running at fetch
Ok(())
}
async fn delete_job(&self, id: Uuid) -> Result<(), MyError> {
debug!("Deleting job {}", id);
self.db
.pool()
.get()
.await?
.execute("DELETE FROM jobs WHERE job_id = $1::UUID;", &[&id])
.await?;
Ok(())
}
async fn get_stats(&self) -> Result<Stats, MyError> {
// TODO: Stats are unimplemented
Ok(Stats::default())
}
async fn update_stats<F>(&self, _f: F) -> Result<(), MyError>
where
F: Fn(Stats) -> Stats + Send,
{
// TODO: Stats are unimplemented
Ok(())
}
}

View file

@ -1,376 +1,139 @@
// need this for ructe
#![allow(clippy::needless_borrow)]
use actix_rt::Arbiter;
use actix_web::{
middleware::{Compress, Logger},
web, App, HttpServer,
};
use std::time::Duration;
use activitystreams::iri_string::types::IriString;
use actix_web::{middleware::Compress, web, App, HttpServer};
use collector::MemoryCollector;
#[cfg(feature = "console")]
use console_subscriber::ConsoleLayer;
use error::Error;
use http_signature_normalization_actix::middleware::VerifySignature;
use metrics_exporter_prometheus::PrometheusBuilder;
use metrics_util::layers::FanoutBuilder;
use opentelemetry::{trace::TracerProvider, KeyValue};
use opentelemetry_otlp::WithExportConfig;
use opentelemetry_sdk::Resource;
use reqwest_middleware::ClientWithMiddleware;
use rustls::ServerConfig;
use tokio::task::JoinHandle;
use tracing_actix_web::TracingLogger;
use tracing_error::ErrorLayer;
use tracing_log::LogTracer;
use tracing_subscriber::{filter::Targets, layer::SubscriberExt, Layer};
mod admin;
mod apub;
mod args;
mod collector;
mod config;
mod data;
mod db;
mod error;
mod extractors;
mod future;
mod http1;
mod jobs;
mod middleware;
mod notify;
mod requests;
mod routes;
mod spawner;
mod stream;
mod telegram;
use crate::config::UrlKind;
use self::{
args::Args,
config::Config,
data::{ActorCache, MediaCache, State},
data::{ActorCache, Media, State},
db::Db,
jobs::create_workers,
middleware::{DebugPayload, MyVerify, RelayResolver, Timings},
routes::{actor, healthz, inbox, index, nodeinfo, nodeinfo_meta, statics},
spawner::Spawner,
jobs::{create_server, create_workers},
middleware::RelayResolver,
routes::{actor, inbox, index, nodeinfo, nodeinfo_meta, statics},
};
fn init_subscriber(
software_name: &'static str,
opentelemetry_url: Option<&IriString>,
) -> color_eyre::Result<()> {
LogTracer::init()?;
color_eyre::install()?;
let targets: Targets = std::env::var("RUST_LOG")
.unwrap_or_else(|_| "info".into())
.parse()?;
let format_layer = tracing_subscriber::fmt::layer().with_filter(targets.clone());
#[cfg(feature = "console")]
let console_layer = ConsoleLayer::builder()
.with_default_env()
.server_addr(([0, 0, 0, 0], 6669))
.event_buffer_capacity(1024 * 1024)
.spawn();
let subscriber = tracing_subscriber::Registry::default()
.with(format_layer)
.with(ErrorLayer::default());
#[cfg(feature = "console")]
let subscriber = subscriber.with(console_layer);
if let Some(url) = opentelemetry_url {
let exporter = opentelemetry_otlp::SpanExporter::builder()
.with_tonic()
.with_endpoint(url.as_str())
.build()?;
let tracer_provider = opentelemetry_sdk::trace::TracerProvider::builder()
.with_resource(Resource::new(vec![KeyValue::new(
"service.name",
software_name,
)]))
.with_batch_exporter(exporter, opentelemetry_sdk::runtime::Tokio)
.build();
let otel_layer = tracing_opentelemetry::layer()
.with_tracer(tracer_provider.tracer(software_name))
.with_filter(targets);
let subscriber = subscriber.with(otel_layer);
tracing::subscriber::set_global_default(subscriber)?;
} else {
tracing::subscriber::set_global_default(subscriber)?;
}
Ok(())
}
fn build_client(
user_agent: &str,
timeout_seconds: u64,
proxy: Option<(&IriString, Option<(&str, &str)>)>,
) -> Result<ClientWithMiddleware, Error> {
let builder = reqwest::Client::builder().user_agent(user_agent.to_string());
let builder = if let Some((url, auth)) = proxy {
let proxy = reqwest::Proxy::all(url.as_str())?;
let proxy = if let Some((username, password)) = auth {
proxy.basic_auth(username, password)
} else {
proxy
};
builder.proxy(proxy)
} else {
builder
};
let client = builder
.timeout(Duration::from_secs(timeout_seconds))
.build()?;
let client_with_middleware = reqwest_middleware::ClientBuilder::new(client)
.with(reqwest_tracing::TracingMiddleware::default())
.build();
Ok(client_with_middleware)
}
#[tokio::main]
async fn main() -> color_eyre::Result<()> {
#[actix_rt::main]
async fn main() -> Result<(), anyhow::Error> {
dotenv::dotenv().ok();
let config = Config::build()?;
init_subscriber(Config::software_name(), config.opentelemetry_url())?;
if config.debug() {
std::env::set_var(
"RUST_LOG",
"debug,tokio_postgres=info,h2=info,trust_dns_resolver=info,trust_dns_proto=info,rustls=info,html5ever=info",
)
} else {
std::env::set_var("RUST_LOG", "info")
}
if config.pretty_log() {
pretty_env_logger::init();
} else {
env_logger::init();
}
let db = Db::build(&config)?;
let args = Args::new();
if args.any() {
client_main(config, args).await??;
if args.jobs_only() && args.no_jobs() {
return Err(anyhow::Error::msg(
"Either the server or the jobs must be run",
));
}
if !args.blocks().is_empty() || !args.whitelists().is_empty() {
if args.undo() {
db.remove_blocks(args.blocks()).await?;
db.remove_whitelists(args.whitelists()).await?;
} else {
db.add_blocks(args.blocks()).await?;
db.add_whitelists(args.whitelists()).await?;
}
return Ok(());
}
let collector = MemoryCollector::new();
if let Some(bind_addr) = config.prometheus_bind_address() {
let (recorder, exporter) = PrometheusBuilder::new()
.with_http_listener(bind_addr)
.build()?;
tokio::spawn(exporter);
let recorder = FanoutBuilder::default()
.add_recorder(recorder)
.add_recorder(collector.clone())
.build();
metrics::set_global_recorder(recorder).map_err(|e| color_eyre::eyre::eyre!("{e}"))?;
} else {
collector.install()?;
}
tracing::info!("Opening DB");
let db = Db::build(&config)?;
tracing::info!("Building caches");
let media = Media::new(db.clone());
let state = State::hydrate(config.clone(), &db).await?;
let actors = ActorCache::new(db.clone());
let media = MediaCache::new(db.clone());
let job_server = create_server(db.clone());
server_main(db, actors, media, collector, config).await?;
notify::Notifier::new(config.database_url().parse()?)
.register(notify::NewBlocks(state.clone()))
.register(notify::NewWhitelists(state.clone()))
.register(notify::NewListeners(state.clone(), job_server.clone()))
.register(notify::NewActors(actors.clone()))
.register(notify::NewNodes(state.node_cache()))
.register(notify::RmBlocks(state.clone()))
.register(notify::RmWhitelists(state.clone()))
.register(notify::RmListeners(state.clone()))
.register(notify::RmActors(actors.clone()))
.register(notify::RmNodes(state.node_cache()))
.start();
tracing::info!("Application exit");
if args.jobs_only() {
for _ in 0..num_cpus::get() {
let state = state.clone();
let actors = actors.clone();
let job_server = job_server.clone();
let media = media.clone();
let config = config.clone();
let db = db.clone();
Ok(())
}
fn client_main(config: Config, args: Args) -> JoinHandle<color_eyre::Result<()>> {
tokio::spawn(do_client_main(config, args))
}
async fn do_client_main(config: Config, args: Args) -> color_eyre::Result<()> {
let client = build_client(
&config.user_agent(),
config.client_timeout(),
config.proxy_config(),
)?;
if !args.blocks().is_empty() || !args.allowed().is_empty() {
if args.undo() {
admin::client::unblock(&client, &config, args.blocks().to_vec()).await?;
admin::client::disallow(&client, &config, args.allowed().to_vec()).await?;
} else {
admin::client::block(&client, &config, args.blocks().to_vec()).await?;
admin::client::allow(&client, &config, args.allowed().to_vec()).await?;
Arbiter::new().exec_fn(move || {
create_workers(db, state, actors, job_server, media, config);
});
}
println!("Updated lists");
actix_rt::signal::ctrl_c().await?;
return Ok(());
}
if args.contacted() {
let last_seen = admin::client::last_seen(&client, &config).await?;
let mut report = String::from("Contacted:");
if !last_seen.never.is_empty() {
report += "\nNever seen:\n";
}
for domain in last_seen.never {
report += "\t";
report += &domain;
report += "\n";
}
if !last_seen.last_seen.is_empty() {
report += "\nSeen:\n";
}
for (datetime, domains) in last_seen.last_seen {
for domain in domains {
report += "\t";
report += &datetime.to_string();
report += " - ";
report += &domain;
report += "\n";
}
}
report += "\n";
println!("{report}");
}
if args.list() {
let (blocked, allowed, connected) = tokio::try_join!(
admin::client::blocked(&client, &config),
admin::client::allowed(&client, &config),
admin::client::connected(&client, &config)
)?;
let mut report = String::from("Report:\n");
if !allowed.allowed_domains.is_empty() {
report += "\nAllowed\n\t";
report += &allowed.allowed_domains.join("\n\t");
}
if !blocked.blocked_domains.is_empty() {
report += "\n\nBlocked\n\t";
report += &blocked.blocked_domains.join("\n\t");
}
if !connected.connected_actors.is_empty() {
report += "\n\nConnected\n\t";
report += &connected.connected_actors.join("\n\t");
}
report += "\n";
println!("{report}");
}
if args.stats() {
let stats = admin::client::stats(&client, &config).await?;
stats.present();
}
Ok(())
}
const VERIFY_RATIO: usize = 7;
async fn server_main(
db: Db,
actors: ActorCache,
media: MediaCache,
collector: MemoryCollector,
config: Config,
) -> color_eyre::Result<()> {
let client = build_client(
&config.user_agent(),
config.client_timeout(),
config.proxy_config(),
)?;
tracing::info!("Creating state");
let (signature_threads, verify_threads) = match config.signature_threads() {
0 | 1 => (1, 1),
n if n <= VERIFY_RATIO => (n, 1),
n => {
let verify_threads = (n / VERIFY_RATIO).max(1);
let signature_threads = n.saturating_sub(verify_threads).max(VERIFY_RATIO);
(signature_threads, verify_threads)
}
};
let verify_spawner = Spawner::build("verify-cpu", verify_threads.try_into()?)?;
let sign_spawner = Spawner::build("sign-cpu", signature_threads.try_into()?)?;
let key_id = config.generate_url(UrlKind::MainKey).to_string();
let state = State::build(db.clone(), key_id, sign_spawner.clone(), client).await?;
if let Some((token, admin_handle)) = config.telegram_info() {
tracing::info!("Creating telegram handler");
telegram::start(admin_handle.to_owned(), db.clone(), token);
}
let cert_resolver = config
.open_keys()
.await?
.map(rustls_channel_resolver::channel::<32>);
let no_jobs = args.no_jobs();
let bind_address = config.bind_address();
let sign_spawner2 = sign_spawner.clone();
let verify_spawner2 = verify_spawner.clone();
let config2 = config.clone();
let job_store = jobs::build_storage();
let server = HttpServer::new(move || {
let job_server = create_workers(
job_store.clone(),
state.clone(),
actors.clone(),
media.clone(),
config.clone(),
)
.expect("Failed to create job server");
HttpServer::new(move || {
if !no_jobs {
create_workers(
db.clone(),
state.clone(),
actors.clone(),
job_server.clone(),
media.clone(),
config.clone(),
);
}
let app = App::new()
.app_data(web::Data::new(db.clone()))
.app_data(web::Data::new(state.clone()))
.app_data(web::Data::new(
state.requests.clone().spawner(verify_spawner.clone()),
))
.app_data(web::Data::new(actors.clone()))
.app_data(web::Data::new(config.clone()))
.app_data(web::Data::new(job_server))
.app_data(web::Data::new(media.clone()))
.app_data(web::Data::new(collector.clone()))
.app_data(web::Data::new(verify_spawner.clone()));
let app = if let Some(data) = config.admin_config() {
app.app_data(data)
} else {
app
};
app.wrap(Compress::default())
.wrap(TracingLogger::default())
.wrap(Timings)
.route("/healthz", web::get().to(healthz))
App::new()
.wrap(Compress::default())
.wrap(Logger::default())
.data(db.clone())
.data(state.clone())
.data(state.requests())
.data(actors.clone())
.data(config.clone())
.data(job_server.clone())
.data(media.clone())
.service(web::resource("/").route(web::get().to(index)))
.service(web::resource("/media/{path}").route(web::get().to(routes::media)))
.service(
web::resource("/inbox")
.wrap(config.digest_middleware().spawner(verify_spawner.clone()))
.wrap(VerifySignature::new(
MyVerify(
state.requests.clone().spawner(verify_spawner.clone()),
actors.clone(),
state.clone(),
verify_spawner.clone(),
),
http_signature_normalization_actix::Config::new(),
))
.wrap(DebugPayload(config.debug()))
.wrap(config.digest_middleware())
.wrap(config.signature_middleware(state.requests(), actors.clone()))
.route(web::post().to(inbox)),
)
.service(web::resource("/actor").route(web::get().to(actor)))
@ -381,59 +144,10 @@ async fn server_main(
.service(web::resource("/nodeinfo").route(web::get().to(nodeinfo_meta))),
)
.service(web::resource("/static/{filename}").route(web::get().to(statics)))
.service(
web::scope("/api/v1").service(
web::scope("/admin")
.route("/allow", web::post().to(admin::routes::allow))
.route("/disallow", web::post().to(admin::routes::disallow))
.route("/block", web::post().to(admin::routes::block))
.route("/unblock", web::post().to(admin::routes::unblock))
.route("/allowed", web::get().to(admin::routes::allowed))
.route("/blocked", web::get().to(admin::routes::blocked))
.route("/connected", web::get().to(admin::routes::connected))
.route("/stats", web::get().to(admin::routes::stats))
.route("/last_seen", web::get().to(admin::routes::last_seen)),
),
)
});
if let Some((cert_tx, cert_rx)) = cert_resolver {
let handle = tokio::spawn(async move {
let mut interval = tokio::time::interval(Duration::from_secs(30));
interval.tick().await;
loop {
interval.tick().await;
match config2.open_keys().await {
Ok(Some(key)) => cert_tx.update(key),
Ok(None) => tracing::warn!("Missing TLS keys"),
Err(e) => tracing::error!("Failed to read TLS keys {e}"),
}
}
});
tracing::info!("Binding to {}:{} with TLS", bind_address.0, bind_address.1);
let server_config = ServerConfig::builder()
.with_no_client_auth()
.with_cert_resolver(cert_rx);
server
.bind_rustls_0_23(bind_address, server_config)?
.run()
.await?;
handle.abort();
let _ = handle.await;
} else {
tracing::info!("Binding to {}:{}", bind_address.0, bind_address.1);
server.bind(bind_address)?.run().await?;
}
sign_spawner2.close().await;
verify_spawner2.close().await;
tracing::info!("Server closed");
})
.bind(bind_address)?
.run()
.await?;
Ok(())
}

View file

@ -1,9 +0,0 @@
mod payload;
mod timings;
mod verifier;
mod webfinger;
pub(crate) use payload::DebugPayload;
pub(crate) use timings::Timings;
pub(crate) use verifier::MyVerify;
pub(crate) use webfinger::RelayResolver;

5
src/middleware/mod.rs Normal file
View file

@ -0,0 +1,5 @@
mod verifier;
mod webfinger;
pub use verifier::MyVerify;
pub use webfinger::RelayResolver;

View file

@ -1,77 +0,0 @@
use actix_web::{
dev::{Payload, Service, ServiceRequest, Transform},
http::Method,
web::BytesMut,
HttpMessage,
};
use std::{
future::{ready, Ready},
task::{Context, Poll},
};
use streem::IntoStreamer;
#[derive(Clone, Debug)]
pub(crate) struct DebugPayload(pub bool);
#[doc(hidden)]
#[derive(Clone, Debug)]
pub(crate) struct DebugPayloadMiddleware<S>(bool, S);
impl<S> Transform<S, ServiceRequest> for DebugPayload
where
S: Service<ServiceRequest, Error = actix_web::Error>,
S::Future: 'static,
S::Error: 'static,
{
type Response = S::Response;
type Error = S::Error;
type InitError = ();
type Transform = DebugPayloadMiddleware<S>;
type Future = Ready<Result<Self::Transform, Self::InitError>>;
fn new_transform(&self, service: S) -> Self::Future {
ready(Ok(DebugPayloadMiddleware(self.0, service)))
}
}
impl<S> Service<ServiceRequest> for DebugPayloadMiddleware<S>
where
S: Service<ServiceRequest, Error = actix_web::Error>,
S::Future: 'static,
S::Error: 'static,
{
type Response = S::Response;
type Error = S::Error;
type Future = S::Future;
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.1.poll_ready(cx)
}
fn call(&self, mut req: ServiceRequest) -> Self::Future {
if self.0 && req.method() == Method::POST {
let mut pl = req.take_payload().into_streamer();
req.set_payload(Payload::Stream {
payload: Box::pin(streem::try_from_fn(|yielder| async move {
let mut buf = BytesMut::new();
while let Some(bytes) = pl.try_next().await? {
buf.extend(bytes);
}
let bytes = buf.freeze();
tracing::info!("{}", String::from_utf8_lossy(&bytes));
yielder.yield_ok(bytes).await;
Ok(())
})),
});
self.1.call(req)
} else {
self.1.call(req)
}
}
}

View file

@ -1,143 +0,0 @@
use actix_web::{
body::MessageBody,
dev::{Service, ServiceRequest, ServiceResponse, Transform},
http::StatusCode,
};
use std::{
future::{ready, Future, Ready},
time::Instant,
};
pub(crate) struct Timings;
pub(crate) struct TimingsMiddleware<S>(S);
struct LogOnDrop {
begin: Instant,
path: String,
method: String,
arm: bool,
}
pin_project_lite::pin_project! {
pub(crate) struct TimingsFuture<F> {
#[pin]
future: F,
log_on_drop: Option<LogOnDrop>,
}
}
pin_project_lite::pin_project! {
pub(crate) struct TimingsBody<B> {
#[pin]
body: B,
log_on_drop: LogOnDrop,
}
}
impl Drop for LogOnDrop {
fn drop(&mut self) {
if self.arm {
let duration = self.begin.elapsed();
metrics::histogram!("relay.request.complete", "path" => self.path.clone(), "method" => self.method.clone()).record(duration);
}
}
}
impl<S, B> Transform<S, ServiceRequest> for Timings
where
S: Service<ServiceRequest, Response = ServiceResponse<B>, Error = actix_web::Error>,
S::Future: 'static,
{
type Response = ServiceResponse<TimingsBody<B>>;
type Error = S::Error;
type InitError = ();
type Transform = TimingsMiddleware<S>;
type Future = Ready<Result<Self::Transform, Self::InitError>>;
fn new_transform(&self, service: S) -> Self::Future {
ready(Ok(TimingsMiddleware(service)))
}
}
impl<S, B> Service<ServiceRequest> for TimingsMiddleware<S>
where
S: Service<ServiceRequest, Response = ServiceResponse<B>, Error = actix_web::Error>,
S::Future: 'static,
{
type Response = ServiceResponse<TimingsBody<B>>;
type Error = S::Error;
type Future = TimingsFuture<S::Future>;
fn poll_ready(
&self,
ctx: &mut core::task::Context<'_>,
) -> std::task::Poll<Result<(), Self::Error>> {
self.0.poll_ready(ctx)
}
fn call(&self, req: ServiceRequest) -> Self::Future {
let log_on_drop = LogOnDrop {
begin: Instant::now(),
path: format!("{:?}", req.match_pattern()),
method: req.method().to_string(),
arm: false,
};
let future = self.0.call(req);
TimingsFuture {
future,
log_on_drop: Some(log_on_drop),
}
}
}
impl<F, B> Future for TimingsFuture<F>
where
F: Future<Output = Result<ServiceResponse<B>, actix_web::Error>>,
{
type Output = Result<ServiceResponse<TimingsBody<B>>, actix_web::Error>;
fn poll(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Self::Output> {
let this = self.project();
let res = std::task::ready!(this.future.poll(cx));
let mut log_on_drop = this
.log_on_drop
.take()
.expect("TimingsFuture polled after completion");
let status = match &res {
Ok(res) => res.status(),
Err(e) => e.as_response_error().status_code(),
};
log_on_drop.arm =
status != StatusCode::NOT_FOUND && status != StatusCode::METHOD_NOT_ALLOWED;
let res = res.map(|r| r.map_body(|_, body| TimingsBody { body, log_on_drop }));
std::task::Poll::Ready(res)
}
}
impl<B: MessageBody> MessageBody for TimingsBody<B> {
type Error = B::Error;
fn size(&self) -> actix_web::body::BodySize {
self.body.size()
}
fn poll_next(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Result<actix_web::web::Bytes, Self::Error>>> {
self.project().body.poll_next(cx)
}
}

View file

@ -1,162 +1,87 @@
use crate::{
apub::AcceptedActors,
data::{ActorCache, State},
error::{Error, ErrorKind},
requests::{BreakerStrategy, Requests},
spawner::Spawner,
};
use activitystreams::{base::BaseExt, iri, iri_string::types::IriString};
use base64::{engine::general_purpose::STANDARD, Engine};
use http_signature_normalization_actix::{prelude::*, verify::DeprecatedAlgorithm, Spawn};
use rsa::{pkcs1::EncodeRsaPublicKey, pkcs8::DecodePublicKey, RsaPublicKey};
use crate::{data::ActorCache, error::MyError, requests::Requests};
use activitystreams_new::uri;
use actix_web::web;
use http_signature_normalization_actix::{prelude::*, verify::DeprecatedAlgorithm};
use log::error;
use rsa::{hash::Hashes, padding::PaddingScheme, PublicKey, RSAPublicKey};
use rsa_pem::KeyExt;
use sha2::{Digest, Sha256};
use std::{future::Future, pin::Pin};
#[derive(Clone, Debug)]
pub(crate) struct MyVerify(pub Requests, pub ActorCache, pub State, pub Spawner);
#[derive(Clone)]
pub struct MyVerify(pub Requests, pub ActorCache);
impl MyVerify {
#[tracing::instrument("Verify request", skip(self, signature, signing_string))]
async fn verify(
&self,
algorithm: Option<Algorithm>,
key_id: String,
signature: String,
signing_string: String,
) -> Result<bool, Error> {
let public_key_id = iri!(key_id);
) -> Result<bool, MyError> {
let mut uri = uri!(key_id);
uri.as_url_mut().set_fragment(None);
let actor = self.1.get(&uri, &self.0).await?;
let was_cached = actor.is_cached();
let actor = actor.into_inner();
// receiving an activity from a domain indicates it is probably online
self.0.reset_breaker(&public_key_id);
let actor_id = if let Some(mut actor_id) = self
.2
.db
.actor_id_from_public_key_id(public_key_id.clone())
.await?
{
if !self.2.db.is_allowed(actor_id.clone()).await? {
return Err(ErrorKind::NotAllowed(key_id).into());
match algorithm {
Some(Algorithm::Hs2019) => (),
Some(Algorithm::Deprecated(DeprecatedAlgorithm::RsaSha256)) => (),
Some(other) => {
return Err(MyError::Algorithm(other.to_string()));
}
actor_id.set_fragment(None);
let actor = self.1.get(&actor_id, &self.0).await?;
let was_cached = actor.is_cached();
let actor = actor.into_inner();
match algorithm {
Some(Algorithm::Hs2019) => (),
Some(Algorithm::Deprecated(DeprecatedAlgorithm::RsaSha256)) => (),
Some(other) => {
return Err(ErrorKind::Algorithm(other.to_string()).into());
}
None => (),
};
let res = do_verify(
&self.3,
&actor.public_key,
signature.clone(),
signing_string.clone(),
)
.await;
if let Err(e) = res {
if !was_cached {
return Err(e);
}
} else {
return Ok(true);
}
actor_id
} else {
match self
.0
.fetch::<PublicKeyResponse>(&public_key_id, BreakerStrategy::Require2XX)
.await
{
Ok(res) => res.actor_id().ok_or(ErrorKind::MissingId),
Err(e) => {
if e.is_gone() {
tracing::warn!("Actor gone: {public_key_id}");
return Ok(false);
} else {
return Err(e);
}
}
}?
None => (),
};
let res = do_verify(&actor.public_key, signature.clone(), signing_string.clone()).await;
if let Err(e) = res {
if !was_cached {
return Err(e);
}
} else {
return Ok(true);
}
// Previously we verified the sig from an actor's local cache
//
// Now we make sure we fetch an updated actor
let actor = self.1.get_no_cache(&actor_id, &self.0).await?;
let actor = self.1.get_no_cache(&uri, &self.0).await?;
do_verify(&self.3, &actor.public_key, signature, signing_string).await?;
do_verify(&actor.public_key, signature, signing_string).await?;
Ok(true)
}
}
#[derive(serde::Deserialize)]
#[serde(untagged)]
#[serde(rename_all = "camelCase")]
enum PublicKeyResponse {
PublicKey {
#[allow(dead_code)]
id: IriString,
owner: IriString,
#[allow(dead_code)]
public_key_pem: String,
},
Actor(Box<AcceptedActors>),
}
impl PublicKeyResponse {
fn actor_id(&self) -> Option<IriString> {
match self {
PublicKeyResponse::PublicKey { owner, .. } => Some(owner.clone()),
PublicKeyResponse::Actor(actor) => actor.id_unchecked().cloned(),
}
}
}
#[tracing::instrument("Verify signature")]
async fn do_verify(
spawner: &Spawner,
public_key: &str,
signature: String,
signing_string: String,
) -> Result<(), Error> {
let public_key = RsaPublicKey::from_public_key_pem(public_key.trim())?;
let public_key_der = public_key
.to_pkcs1_der()
.map_err(|_| ErrorKind::DerEncode)?;
let public_key = ring::signature::UnparsedPublicKey::new(
&ring::signature::RSA_PKCS1_2048_8192_SHA256,
public_key_der,
);
) -> Result<(), MyError> {
let public_key = RSAPublicKey::from_pem_pkcs8(public_key)?;
let span = tracing::Span::current();
spawner
.spawn_blocking(move || {
span.in_scope(|| {
let decoded = STANDARD.decode(signature)?;
web::block(move || {
let decoded = base64::decode(signature)?;
let hashed = Sha256::digest(signing_string.as_bytes());
public_key
.verify(signing_string.as_bytes(), decoded.as_slice())
.map_err(|_| ErrorKind::VerifySignature)?;
public_key.verify(
PaddingScheme::PKCS1v15,
Some(&Hashes::SHA2_256),
&hashed,
&decoded,
)?;
Ok(()) as Result<(), Error>
})
})
.await??;
Ok(()) as Result<(), MyError>
})
.await?;
Ok(())
}
impl SignatureVerify for MyVerify {
type Error = Error;
type Error = MyError;
type Future = Pin<Box<dyn Future<Output = Result<bool, Self::Error>>>>;
fn signature_verify(
@ -171,39 +96,10 @@ impl SignatureVerify for MyVerify {
Box::pin(async move {
this.verify(algorithm, key_id, signature, signing_string)
.await
.map_err(|e| {
error!("Failed to verify, {}", e);
e
})
})
}
}
#[cfg(test)]
mod tests {
use crate::apub::AcceptedActors;
use rsa::{pkcs8::DecodePublicKey, RsaPublicKey};
const ASONIX_DOG_ACTOR: &str = r#"{"@context":["https://www.w3.org/ns/activitystreams","https://w3id.org/security/v1",{"manuallyApprovesFollowers":"as:manuallyApprovesFollowers","toot":"http://joinmastodon.org/ns#","featured":{"@id":"toot:featured","@type":"@id"},"featuredTags":{"@id":"toot:featuredTags","@type":"@id"},"alsoKnownAs":{"@id":"as:alsoKnownAs","@type":"@id"},"movedTo":{"@id":"as:movedTo","@type":"@id"},"schema":"http://schema.org#","PropertyValue":"schema:PropertyValue","value":"schema:value","discoverable":"toot:discoverable","Device":"toot:Device","Ed25519Signature":"toot:Ed25519Signature","Ed25519Key":"toot:Ed25519Key","Curve25519Key":"toot:Curve25519Key","EncryptedMessage":"toot:EncryptedMessage","publicKeyBase64":"toot:publicKeyBase64","deviceId":"toot:deviceId","claim":{"@type":"@id","@id":"toot:claim"},"fingerprintKey":{"@type":"@id","@id":"toot:fingerprintKey"},"identityKey":{"@type":"@id","@id":"toot:identityKey"},"devices":{"@type":"@id","@id":"toot:devices"},"messageFranking":"toot:messageFranking","messageType":"toot:messageType","cipherText":"toot:cipherText","suspended":"toot:suspended"}],"id":"https://masto.asonix.dog/actor","type":"Application","inbox":"https://masto.asonix.dog/actor/inbox","outbox":"https://masto.asonix.dog/actor/outbox","preferredUsername":"masto.asonix.dog","url":"https://masto.asonix.dog/about/more?instance_actor=true","manuallyApprovesFollowers":true,"publicKey":{"id":"https://masto.asonix.dog/actor#main-key","owner":"https://masto.asonix.dog/actor","publicKeyPem":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx8zXS0QNg9YGUBsxAOBH\nJaxIn7i6t+Z4UOpSFDVa2kP0NvQgIJsq3wzRqvaiuncRWpkyFk1fTakiRGD32xnY\nt+juuAaIBlU8eswKyANFqhcLAvFHmT3rA1848M4/YM19djvlL/PR9T53tPNHU+el\nS9MlsG3o6Zsj8YaUJtCI8RgEuJoROLHUb/V9a3oMQ7CfuIoSvF3VEz3/dRT09RW6\n0wQX7yhka9WlKuayWLWmTcB9lAIX6neBk+qKc8VSEsO7mHkzB8mRgVcS2uYZl1eA\nD8/jTT+SlpeFNDZk0Oh35GNFoOxh9qjRw3NGxu7jJCVBehDODzasOv4xDxKAhONa\njQIDAQAB\n-----END PUBLIC KEY-----\n"},"endpoints":{"sharedInbox":"https://masto.asonix.dog/inbox"}}"#;
const KARJALAZET_RELAY: &str = r#"{"@context":["https://www.w3.org/ns/activitystreams","https://pleroma.karjalazet.se/schemas/litepub-0.1.jsonld",{"@language":"und"}],"alsoKnownAs":[],"attachment":[],"capabilities":{},"discoverable":false,"endpoints":{"oauthAuthorizationEndpoint":"https://pleroma.karjalazet.se/oauth/authorize","oauthRegistrationEndpoint":"https://pleroma.karjalazet.se/api/v1/apps","oauthTokenEndpoint":"https://pleroma.karjalazet.se/oauth/token","sharedInbox":"https://pleroma.karjalazet.se/inbox","uploadMedia":"https://pleroma.karjalazet.se/api/ap/upload_media"},"featured":"https://pleroma.karjalazet.se/relay/collections/featured","followers":"https://pleroma.karjalazet.se/relay/followers","following":"https://pleroma.karjalazet.se/relay/following","id":"https://pleroma.karjalazet.se/relay","inbox":"https://pleroma.karjalazet.se/relay/inbox","manuallyApprovesFollowers":false,"name":null,"outbox":"https://pleroma.karjalazet.se/relay/outbox","preferredUsername":"relay","publicKey":{"id":"https://pleroma.karjalazet.se/relay#main-key","owner":"https://pleroma.karjalazet.se/relay","publicKeyPem":"-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAucoyCht6QpEzUPdQWP/J\nJYxObSH3MCcXBnG4d0OX78QshloeAHhl78EZ5c8I0ePmIjDg2NFK3/pG0EvSrHe2\nIZHnHaN5emgCb2ifNya5W572yfQXo1tUQy+ZXtbTUA7BWbr4LuCvd+HUavMwbx72\neraSZTiQj//ObwpbXFoZO5I/+e5avGmVnfmr/y2cG95hqFDtI3438RgZyBjY5kJM\nY1MLWoY9itGSfYmBtxRj3umlC2bPuBB+hHUJi6TvP7NO6zuUZ66m4ETyuBDi8iP6\ngnUp3Q4+1/I3nDUmhjt7OXckUcX3r5M4UHD3VVUFG0aZw6WWMEAxlyFf/07fCkhR\nBwIDAQAB\n-----END PUBLIC KEY-----\n\n"},"summary":"","tag":[],"type":"Person","url":"https://pleroma.karjalazet.se/relay"}"#;
const ASONIX_DOG_KEY: &str = "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx8zXS0QNg9YGUBsxAOBH\nJaxIn7i6t+Z4UOpSFDVa2kP0NvQgIJsq3wzRqvaiuncRWpkyFk1fTakiRGD32xnY\nt+juuAaIBlU8eswKyANFqhcLAvFHmT3rA1848M4/YM19djvlL/PR9T53tPNHU+el\nS9MlsG3o6Zsj8YaUJtCI8RgEuJoROLHUb/V9a3oMQ7CfuIoSvF3VEz3/dRT09RW6\n0wQX7yhka9WlKuayWLWmTcB9lAIX6neBk+qKc8VSEsO7mHkzB8mRgVcS2uYZl1eA\nD8/jTT+SlpeFNDZk0Oh35GNFoOxh9qjRw3NGxu7jJCVBehDODzasOv4xDxKAhONa\njQIDAQAB\n-----END PUBLIC KEY-----\n";
const KARJALAZET_KEY: &str = "-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAucoyCht6QpEzUPdQWP/J\nJYxObSH3MCcXBnG4d0OX78QshloeAHhl78EZ5c8I0ePmIjDg2NFK3/pG0EvSrHe2\nIZHnHaN5emgCb2ifNya5W572yfQXo1tUQy+ZXtbTUA7BWbr4LuCvd+HUavMwbx72\neraSZTiQj//ObwpbXFoZO5I/+e5avGmVnfmr/y2cG95hqFDtI3438RgZyBjY5kJM\nY1MLWoY9itGSfYmBtxRj3umlC2bPuBB+hHUJi6TvP7NO6zuUZ66m4ETyuBDi8iP6\ngnUp3Q4+1/I3nDUmhjt7OXckUcX3r5M4UHD3VVUFG0aZw6WWMEAxlyFf/07fCkhR\nBwIDAQAB\n-----END PUBLIC KEY-----\n\n";
#[test]
fn handles_masto_keys() {
println!("{ASONIX_DOG_KEY}");
let _ = RsaPublicKey::from_public_key_pem(ASONIX_DOG_KEY.trim()).unwrap();
}
#[test]
fn handles_pleromo_keys() {
println!("{KARJALAZET_KEY}");
let _ = RsaPublicKey::from_public_key_pem(KARJALAZET_KEY.trim()).unwrap();
}
#[test]
fn handles_pleromo_relay_format() {
let _: AcceptedActors = serde_json::from_str(KARJALAZET_RELAY).unwrap();
}
#[test]
fn handles_masto_relay_format() {
let _: AcceptedActors = serde_json::from_str(ASONIX_DOG_ACTOR).unwrap();
}
}

View file

@ -1,39 +1,33 @@
use crate::{
config::{Config, UrlKind},
data::State,
future::LocalBoxFuture,
};
use actix_web::web::Data;
use actix_webfinger::{Resolver, Webfinger};
use rsa_magic_public_key::AsMagicPublicKey;
use std::{future::Future, pin::Pin};
pub(crate) struct RelayResolver;
pub struct RelayResolver;
#[derive(Clone, Debug, thiserror::Error)]
#[error("Error resolving webfinger data")]
pub(crate) struct RelayError;
pub struct RelayError;
type FutResult<T, E> = dyn Future<Output = Result<T, E>>;
impl Resolver for RelayResolver {
type State = (Data<State>, Data<Config>);
type Error = RelayError;
fn find(
scheme: Option<&str>,
account: &str,
domain: &str,
(state, config): Self::State,
) -> LocalBoxFuture<'static, Result<Option<Webfinger>, Self::Error>> {
) -> Pin<Box<FutResult<Option<Webfinger>, Self::Error>>> {
let domain = domain.to_owned();
let account = account.to_owned();
let scheme = scheme.map(|scheme| scheme.to_owned());
let fut = async move {
if let Some(scheme) = scheme {
if scheme != "acct:" {
return Ok(None);
}
}
if domain != config.hostname() {
return Ok(None);
}

263
src/notify.rs Normal file
View file

@ -0,0 +1,263 @@
use crate::{
data::{ActorCache, NodeCache, State},
db::listen,
jobs::{JobServer, QueryInstance, QueryNodeinfo},
};
use activitystreams_new::primitives::XsdAnyUri;
use actix_rt::{spawn, time::delay_for};
use futures::stream::{poll_fn, StreamExt};
use log::{debug, error, warn};
use std::{collections::HashMap, sync::Arc, time::Duration};
use tokio_postgres::{tls::NoTls, AsyncMessage, Config};
use uuid::Uuid;
pub trait Listener {
fn key(&self) -> &str;
fn execute(&self, payload: &str);
}
pub struct Notifier {
config: Config,
listeners: HashMap<String, Vec<Box<dyn Listener + Send + Sync + 'static>>>,
}
impl Notifier {
pub fn new(config: Config) -> Self {
Notifier {
config,
listeners: HashMap::new(),
}
}
pub fn register<L>(mut self, l: L) -> Self
where
L: Listener + Send + Sync + 'static,
{
let v = self
.listeners
.entry(l.key().to_owned())
.or_insert_with(Vec::new);
v.push(Box::new(l));
self
}
pub fn start(self) {
spawn(async move {
let Notifier { config, listeners } = self;
loop {
let (new_client, mut conn) = match config.connect(NoTls).await {
Ok((client, conn)) => (client, conn),
Err(e) => {
error!("Error establishing DB Connection, {}", e);
delay_for(Duration::new(5, 0)).await;
continue;
}
};
let client = Arc::new(new_client);
let new_client = client.clone();
spawn(async move {
if let Err(e) = listen(&new_client).await {
error!("Error listening for updates, {}", e);
}
});
let mut stream = poll_fn(move |cx| conn.poll_message(cx));
loop {
match stream.next().await {
Some(Ok(AsyncMessage::Notification(n))) => {
debug!("Handling Notification, {:?}", n);
if let Some(v) = listeners.get(n.channel()) {
for l in v {
l.execute(n.payload());
}
}
}
Some(Ok(AsyncMessage::Notice(e))) => {
debug!("Handling Notice, {:?}", e);
}
Some(Ok(_)) => {
debug!("Handling rest");
}
Some(Err(e)) => {
debug!("Breaking loop due to error Error, {:?}", e);
break;
}
None => {
debug!("End of stream, breaking loop");
break;
}
}
}
drop(client);
warn!("Restarting listener task");
}
});
}
}
pub struct NewBlocks(pub State);
pub struct NewWhitelists(pub State);
pub struct NewListeners(pub State, pub JobServer);
pub struct NewActors(pub ActorCache);
pub struct NewNodes(pub NodeCache);
pub struct RmBlocks(pub State);
pub struct RmWhitelists(pub State);
pub struct RmListeners(pub State);
pub struct RmActors(pub ActorCache);
pub struct RmNodes(pub NodeCache);
impl Listener for NewBlocks {
fn key(&self) -> &str {
"new_blocks"
}
fn execute(&self, payload: &str) {
debug!("Caching block of {}", payload);
let state = self.0.clone();
let payload = payload.to_owned();
spawn(async move { state.cache_block(payload).await });
}
}
impl Listener for NewWhitelists {
fn key(&self) -> &str {
"new_whitelists"
}
fn execute(&self, payload: &str) {
debug!("Caching whitelist of {}", payload);
let state = self.0.clone();
let payload = payload.to_owned();
spawn(async move { state.cache_whitelist(payload.to_owned()).await });
}
}
impl Listener for NewListeners {
fn key(&self) -> &str {
"new_listeners"
}
fn execute(&self, payload: &str) {
if let Ok(uri) = payload.parse::<XsdAnyUri>() {
debug!("Caching listener {}", uri);
let state = self.0.clone();
let _ = self.1.queue(QueryInstance::new(uri.clone()));
let _ = self.1.queue(QueryNodeinfo::new(uri.clone()));
spawn(async move { state.cache_listener(uri).await });
} else {
warn!("Not caching listener {}, parse error", payload);
}
}
}
impl Listener for NewActors {
fn key(&self) -> &str {
"new_actors"
}
fn execute(&self, payload: &str) {
if let Ok(uri) = payload.parse::<XsdAnyUri>() {
debug!("Caching actor {}", uri);
let actors = self.0.clone();
spawn(async move { actors.cache_follower(uri).await });
} else {
warn!("Not caching actor {}, parse error", payload);
}
}
}
impl Listener for NewNodes {
fn key(&self) -> &str {
"new_nodes"
}
fn execute(&self, payload: &str) {
if let Ok(uuid) = payload.parse::<Uuid>() {
debug!("Caching node {}", uuid);
let nodes = self.0.clone();
spawn(async move { nodes.cache_by_id(uuid).await });
} else {
warn!("Not caching node {}, parse error", payload);
}
}
}
impl Listener for RmBlocks {
fn key(&self) -> &str {
"rm_blocks"
}
fn execute(&self, payload: &str) {
debug!("Busting block cache for {}", payload);
let state = self.0.clone();
let payload = payload.to_owned();
spawn(async move { state.bust_block(&payload).await });
}
}
impl Listener for RmWhitelists {
fn key(&self) -> &str {
"rm_whitelists"
}
fn execute(&self, payload: &str) {
debug!("Busting whitelist cache for {}", payload);
let state = self.0.clone();
let payload = payload.to_owned();
spawn(async move { state.bust_whitelist(&payload).await });
}
}
impl Listener for RmListeners {
fn key(&self) -> &str {
"rm_listeners"
}
fn execute(&self, payload: &str) {
if let Ok(uri) = payload.parse::<XsdAnyUri>() {
debug!("Busting listener cache for {}", uri);
let state = self.0.clone();
spawn(async move { state.bust_listener(&uri).await });
} else {
warn!("Not busting listener cache for {}", payload);
}
}
}
impl Listener for RmActors {
fn key(&self) -> &str {
"rm_actors"
}
fn execute(&self, payload: &str) {
if let Ok(uri) = payload.parse::<XsdAnyUri>() {
debug!("Busting actor cache for {}", uri);
let actors = self.0.clone();
spawn(async move { actors.bust_follower(&uri).await });
} else {
warn!("Not busting actor cache for {}", payload);
}
}
}
impl Listener for RmNodes {
fn key(&self) -> &str {
"rm_nodes"
}
fn execute(&self, payload: &str) {
if let Ok(uuid) = payload.parse::<Uuid>() {
debug!("Caching node {}", uuid);
let nodes = self.0.clone();
spawn(async move { nodes.bust_by_id(uuid).await });
} else {
warn!("Not caching node {}, parse error", payload);
}
}
}

View file

@ -1,458 +1,227 @@
use crate::{
data::LastOnline,
error::{Error, ErrorKind},
spawner::Spawner,
stream::{aggregate, limit_stream},
};
use activitystreams::iri_string::types::IriString;
use actix_web::http::header::Date;
use base64::{engine::general_purpose::STANDARD, Engine};
use dashmap::DashMap;
use http_signature_normalization_reqwest::{digest::ring::Sha256, prelude::*};
use reqwest_middleware::ClientWithMiddleware;
use ring::{
rand::SystemRandom,
signature::{RsaKeyPair, RSA_PKCS1_SHA256},
};
use rsa::{pkcs1::EncodeRsaPrivateKey, RsaPrivateKey};
use crate::error::MyError;
use activitystreams_new::primitives::XsdAnyUri;
use actix_web::{client::Client, http::header::Date};
use bytes::Bytes;
use http_signature_normalization_actix::prelude::*;
use log::{debug, info, warn};
use rsa::{hash::Hashes, padding::PaddingScheme, RSAPrivateKey};
use sha2::{Digest, Sha256};
use std::{
sync::Arc,
time::{Duration, SystemTime},
cell::RefCell,
rc::Rc,
sync::atomic::{AtomicUsize, Ordering},
time::SystemTime,
};
const ONE_SECOND: u64 = 1;
const ONE_MINUTE: u64 = 60 * ONE_SECOND;
const ONE_HOUR: u64 = 60 * ONE_MINUTE;
const ONE_DAY: u64 = 24 * ONE_HOUR;
// 20 KB
const JSON_SIZE_LIMIT: usize = 20 * 1024;
#[derive(Debug)]
pub(crate) enum BreakerStrategy {
// Requires a successful response
Require2XX,
// Allows HTTP 2xx-401
Allow401AndBelow,
// Allows HTTP 2xx-404
Allow404AndBelow,
}
#[derive(Clone)]
pub(crate) struct Breakers {
inner: Arc<DashMap<String, Breaker>>,
}
impl std::fmt::Debug for Breakers {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Breakers").finish()
}
}
impl Breakers {
pub(crate) fn should_try(&self, url: &IriString) -> bool {
if let Some(authority) = url.authority_str() {
if let Some(breaker) = self.inner.get(authority) {
breaker.should_try()
} else {
true
}
} else {
false
}
}
fn fail(&self, url: &IriString) {
if let Some(authority) = url.authority_str() {
let should_write = {
if let Some(mut breaker) = self.inner.get_mut(authority) {
breaker.fail();
if !breaker.should_try() {
tracing::warn!("Failed breaker for {authority}");
}
false
} else {
true
}
};
if should_write {
let mut breaker = self.inner.entry(authority.to_owned()).or_default();
breaker.fail();
}
}
}
fn succeed(&self, url: &IriString) {
if let Some(authority) = url.authority_str() {
let should_write = {
if let Some(mut breaker) = self.inner.get_mut(authority) {
breaker.succeed();
false
} else {
true
}
};
if should_write {
let mut breaker = self.inner.entry(authority.to_owned()).or_default();
breaker.succeed();
}
}
}
}
impl Default for Breakers {
fn default() -> Self {
Breakers {
inner: Arc::new(DashMap::new()),
}
}
}
#[derive(Debug)]
struct Breaker {
failures: usize,
last_attempt: SystemTime,
last_success: SystemTime,
}
impl Breaker {
const FAILURE_WAIT: Duration = Duration::from_secs(ONE_DAY);
const FAILURE_THRESHOLD: usize = 10;
fn should_try(&self) -> bool {
self.failures < Self::FAILURE_THRESHOLD
|| self.last_attempt + Self::FAILURE_WAIT < SystemTime::now()
}
fn fail(&mut self) {
self.failures += 1;
self.last_attempt = SystemTime::now();
}
fn succeed(&mut self) {
self.failures = 0;
self.last_attempt = SystemTime::now();
self.last_success = SystemTime::now();
}
}
impl Default for Breaker {
fn default() -> Self {
let now = SystemTime::now();
Breaker {
failures: 0,
last_attempt: now,
last_success: now,
}
}
}
#[derive(Clone)]
pub(crate) struct Requests {
client: ClientWithMiddleware,
pub struct Requests {
client: Rc<RefCell<Client>>,
consecutive_errors: Rc<AtomicUsize>,
error_limit: usize,
key_id: String,
private_key: Arc<RsaKeyPair>,
rng: SystemRandom,
config: Config<Spawner>,
breakers: Breakers,
last_online: Arc<LastOnline>,
}
impl std::fmt::Debug for Requests {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Requests")
.field("key_id", &self.key_id)
.field("config", &self.config)
.field("breakers", &self.breakers)
.finish()
}
user_agent: String,
private_key: RSAPrivateKey,
config: Config,
}
impl Requests {
#[allow(clippy::too_many_arguments)]
pub(crate) fn new(
key_id: String,
private_key: RsaPrivateKey,
breakers: Breakers,
last_online: Arc<LastOnline>,
spawner: Spawner,
client: ClientWithMiddleware,
) -> Self {
let private_key_der = private_key.to_pkcs1_der().expect("Can encode der");
let private_key = ring::signature::RsaKeyPair::from_der(private_key_der.as_bytes())
.expect("Key is valid");
pub fn new(key_id: String, private_key: RSAPrivateKey, user_agent: String) -> Self {
Requests {
client,
client: Rc::new(RefCell::new(
Client::build()
.header("User-Agent", user_agent.clone())
.finish(),
)),
consecutive_errors: Rc::new(AtomicUsize::new(0)),
error_limit: 3,
key_id,
private_key: Arc::new(private_key),
rng: SystemRandom::new(),
config: Config::new_with_spawner(spawner).mastodon_compat(),
breakers,
last_online,
user_agent,
private_key,
config: Config::default().dont_use_created_field(),
}
}
pub(crate) fn spawner(mut self, spawner: Spawner) -> Self {
self.config = self.config.set_spawner(spawner);
self
fn count_err(&self) {
let count = self.consecutive_errors.fetch_add(1, Ordering::Relaxed);
if count + 1 >= self.error_limit {
warn!("{} consecutive errors, rebuilding http client", count);
*self.client.borrow_mut() = Client::build()
.header("User-Agent", self.user_agent.clone())
.finish();
self.reset_err();
}
}
pub(crate) fn reset_breaker(&self, iri: &IriString) {
self.breakers.succeed(iri);
fn reset_err(&self) {
self.consecutive_errors.swap(0, Ordering::Relaxed);
}
async fn check_response(
&self,
parsed_url: &IriString,
strategy: BreakerStrategy,
res: Result<reqwest::Response, reqwest_middleware::Error>,
) -> Result<reqwest::Response, Error> {
pub async fn fetch<T>(&self, url: &str) -> Result<T, MyError>
where
T: serde::de::DeserializeOwned,
{
let signer = self.signer();
let client: Client = self.client.borrow().clone();
let res = client
.get(url)
.header("Accept", "application/activity+json")
.set(Date(SystemTime::now().into()))
.signature(
self.config.clone(),
self.key_id.clone(),
move |signing_string| signer.sign(signing_string),
)
.await?
.send()
.await;
if res.is_err() {
self.breakers.fail(&parsed_url);
self.count_err();
}
let res = res?;
let mut res = res.map_err(|e| MyError::SendRequest(url.to_string(), e.to_string()))?;
let status = res.status();
self.reset_err();
let success = match strategy {
BreakerStrategy::Require2XX => status.is_success(),
BreakerStrategy::Allow401AndBelow => (200..=401).contains(&status.as_u16()),
BreakerStrategy::Allow404AndBelow => (200..=404).contains(&status.as_u16()),
};
if !success {
self.breakers.fail(&parsed_url);
if let Ok(s) = res.text().await {
if !s.is_empty() {
tracing::debug!("Response from {parsed_url}, {s}");
if !res.status().is_success() {
if let Ok(bytes) = res.body().await {
if let Ok(s) = String::from_utf8(bytes.as_ref().to_vec()) {
if !s.is_empty() {
debug!("Response from {}, {}", url, s);
}
}
}
return Err(ErrorKind::Status(
parsed_url.to_string(),
crate::http1::status_to_http02(status),
)
.into());
return Err(MyError::Status(res.status()));
}
// only actually succeed a breaker on 2xx response
if status.is_success() {
self.last_online.mark_seen(&parsed_url);
self.breakers.succeed(&parsed_url);
}
Ok(res)
}
#[tracing::instrument(name = "Fetch Json", skip(self), fields(signing_string))]
pub(crate) async fn fetch_json<T>(
&self,
url: &IriString,
strategy: BreakerStrategy,
) -> Result<T, Error>
where
T: serde::de::DeserializeOwned,
{
self.do_fetch(url, "application/json", strategy).await
}
#[tracing::instrument(name = "Fetch Json", skip(self), fields(signing_string))]
pub(crate) async fn fetch_json_msky<T>(
&self,
url: &IriString,
strategy: BreakerStrategy,
) -> Result<T, Error>
where
T: serde::de::DeserializeOwned,
{
let stream = self
.do_deliver(
url,
&serde_json::json!({}),
"application/json",
"application/json",
strategy,
)
.await?
.bytes_stream();
let body = aggregate(limit_stream(stream, JSON_SIZE_LIMIT)).await?;
Ok(serde_json::from_slice(&body)?)
}
#[tracing::instrument(name = "Fetch Activity+Json", skip(self), fields(signing_string))]
pub(crate) async fn fetch<T>(
&self,
url: &IriString,
strategy: BreakerStrategy,
) -> Result<T, Error>
where
T: serde::de::DeserializeOwned,
{
self.do_fetch(url, "application/activity+json", strategy)
res.json()
.await
.map_err(|e| MyError::ReceiveResponse(url.to_string(), e.to_string()))
}
async fn do_fetch<T>(
&self,
url: &IriString,
accept: &str,
strategy: BreakerStrategy,
) -> Result<T, Error>
where
T: serde::de::DeserializeOwned,
{
let stream = self
.do_fetch_response(url, accept, strategy)
pub async fn fetch_bytes(&self, url: &str) -> Result<(String, Bytes), MyError> {
info!("Fetching bytes for {}", url);
let signer = self.signer();
let client: Client = self.client.borrow().clone();
let res = client
.get(url)
.header("Accept", "*/*")
.set(Date(SystemTime::now().into()))
.signature(
self.config.clone(),
self.key_id.clone(),
move |signing_string| signer.sign(signing_string),
)
.await?
.bytes_stream();
.send()
.await;
let body = aggregate(limit_stream(stream, JSON_SIZE_LIMIT)).await?;
Ok(serde_json::from_slice(&body)?)
}
#[tracing::instrument(name = "Fetch response", skip(self), fields(signing_string))]
pub(crate) async fn fetch_response(
&self,
url: &IriString,
strategy: BreakerStrategy,
) -> Result<reqwest::Response, Error> {
self.do_fetch_response(url, "*/*", strategy).await
}
pub(crate) async fn do_fetch_response(
&self,
url: &IriString,
accept: &str,
strategy: BreakerStrategy,
) -> Result<reqwest::Response, Error> {
if !self.breakers.should_try(url) {
return Err(ErrorKind::Breaker.into());
if res.is_err() {
self.count_err();
}
let signer = self.signer();
let span = tracing::Span::current();
let mut res = res.map_err(|e| MyError::SendRequest(url.to_string(), e.to_string()))?;
let request = self
.client
.get(url.as_str())
.header("Accept", accept)
.header("Date", Date(SystemTime::now().into()).to_string())
.signature(&self.config, self.key_id.clone(), move |signing_string| {
span.record("signing_string", signing_string);
span.in_scope(|| signer.sign(signing_string))
})
.await?;
self.reset_err();
let res = self.client.execute(request).await;
let content_type = if let Some(content_type) = res.headers().get("content-type") {
if let Ok(s) = content_type.to_str() {
s.to_owned()
} else {
return Err(MyError::ContentType);
}
} else {
return Err(MyError::ContentType);
};
let res = self.check_response(url, strategy, res).await?;
if !res.status().is_success() {
if let Ok(bytes) = res.body().await {
if let Ok(s) = String::from_utf8(bytes.as_ref().to_vec()) {
if !s.is_empty() {
debug!("Response from {}, {}", url, s);
}
}
}
Ok(res)
}
#[tracing::instrument(
"Deliver to Inbox",
skip_all,
fields(inbox = inbox.to_string().as_str(), signing_string)
)]
pub(crate) async fn deliver<T>(
&self,
inbox: &IriString,
item: &T,
strategy: BreakerStrategy,
) -> Result<(), Error>
where
T: serde::ser::Serialize + std::fmt::Debug,
{
self.do_deliver(
inbox,
item,
"application/activity+json",
"application/activity+json",
strategy,
)
.await?;
Ok(())
}
async fn do_deliver<T>(
&self,
inbox: &IriString,
item: &T,
content_type: &str,
accept: &str,
strategy: BreakerStrategy,
) -> Result<reqwest::Response, Error>
where
T: serde::ser::Serialize + std::fmt::Debug,
{
if !self.breakers.should_try(&inbox) {
return Err(ErrorKind::Breaker.into());
return Err(MyError::Status(res.status()));
}
let bytes = match res.body().limit(1024 * 1024 * 4).await {
Err(e) => {
return Err(MyError::ReceiveResponse(url.to_string(), e.to_string()));
}
Ok(bytes) => bytes,
};
Ok((content_type, bytes))
}
pub async fn deliver<T>(&self, inbox: XsdAnyUri, item: &T) -> Result<(), MyError>
where
T: serde::ser::Serialize,
{
let signer = self.signer();
let span = tracing::Span::current();
let item_string = serde_json::to_string(item)?;
let request = self
.client
let client: Client = self.client.borrow().clone();
let res = client
.post(inbox.as_str())
.header("Accept", accept)
.header("Content-Type", content_type)
.header("Date", Date(SystemTime::now().into()).to_string())
.header("Accept", "application/activity+json")
.header("Content-Type", "application/activity+json")
.set(Date(SystemTime::now().into()))
.signature_with_digest(
self.config.clone(),
self.key_id.clone(),
Sha256::new(),
item_string,
move |signing_string| {
span.record("signing_string", signing_string);
span.in_scope(|| signer.sign(signing_string))
},
move |signing_string| signer.sign(signing_string),
)
.await?;
.await?
.send()
.await;
let res = self.client.execute(request).await;
if res.is_err() {
self.count_err();
}
let res = self.check_response(inbox, strategy, res).await?;
let mut res = res.map_err(|e| MyError::SendRequest(inbox.to_string(), e.to_string()))?;
Ok(res)
self.reset_err();
if !res.status().is_success() {
if let Ok(bytes) = res.body().await {
if let Ok(s) = String::from_utf8(bytes.as_ref().to_vec()) {
if !s.is_empty() {
debug!("Response from {}, {}", inbox.as_str(), s);
}
}
}
return Err(MyError::Status(res.status()));
}
Ok(())
}
fn signer(&self) -> Signer {
Signer {
private_key: self.private_key.clone(),
rng: self.rng.clone(),
}
}
}
struct Signer {
private_key: Arc<RsaKeyPair>,
rng: SystemRandom,
private_key: RSAPrivateKey,
}
impl Signer {
fn sign(&self, signing_string: &str) -> Result<String, Error> {
let mut signature = vec![0; self.private_key.public().modulus_len()];
self.private_key
.sign(
&RSA_PKCS1_SHA256,
&self.rng,
signing_string.as_bytes(),
&mut signature,
)
.map_err(|_| ErrorKind::SignRequest)?;
Ok(STANDARD.encode(&signature))
fn sign(&self, signing_string: &str) -> Result<String, MyError> {
let hashed = Sha256::digest(signing_string.as_bytes());
let bytes =
self.private_key
.sign(PaddingScheme::PKCS1v15, Some(&Hashes::SHA2_256), &hashed)?;
Ok(base64::encode(bytes))
}
}

View file

@ -2,33 +2,30 @@ use crate::{
apub::{PublicKey, PublicKeyInner},
config::{Config, UrlKind},
data::State,
error::Error,
error::MyError,
routes::ok,
};
use activitystreams::{
use activitystreams_ext::Ext1;
use activitystreams_new::{
actor::{ApActor, Application, Endpoints},
context,
prelude::*,
security,
};
use activitystreams_ext::Ext1;
use actix_web::{web, Responder};
use rsa::pkcs8::EncodePublicKey;
use rsa_pem::KeyExt;
#[tracing::instrument(name = "Actor", skip(config, state))]
pub(crate) async fn route(
pub async fn route(
state: web::Data<State>,
config: web::Data<Config>,
) -> Result<impl Responder, Error> {
) -> Result<impl Responder, MyError> {
let mut application = Ext1::new(
ApActor::new(config.generate_url(UrlKind::Inbox), Application::new()),
PublicKey {
public_key: PublicKeyInner {
id: config.generate_url(UrlKind::MainKey),
owner: config.generate_url(UrlKind::Actor),
public_key_pem: state
.public_key
.to_public_key_pem(rsa::pkcs8::LineEnding::default())?,
public_key_pem: state.public_key.to_pem_pkcs8()?,
},
},
);

View file

@ -1,7 +0,0 @@
use crate::{data::State, error::Error};
use actix_web::{web, HttpResponse};
pub(crate) async fn route(state: web::Data<State>) -> Result<HttpResponse, Error> {
state.db.check_health().await?;
Ok(HttpResponse::Ok().finish())
}

View file

@ -1,134 +1,124 @@
use crate::{
apub::{AcceptedActivities, AcceptedUndoObjects, UndoTypes, ValidTypes},
config::{Config, UrlKind},
data::{ActorCache, State},
db::Actor,
error::{Error, ErrorKind},
data::{Actor, ActorCache, State},
error::MyError,
jobs::apub::{Announce, Follow, Forward, Reject, Undo},
jobs::JobServer,
requests::Requests,
routes::accepted,
};
use activitystreams::{
activity, base::AnyBase, iri_string::types::IriString, prelude::*, primitives::OneOrMany,
use activitystreams_new::{
activity,
base::AnyBase,
prelude::*,
primitives::{OneOrMany, XsdAnyUri},
public,
};
use actix_web::{web, HttpResponse};
use futures::join;
use http_signature_normalization_actix::prelude::{DigestVerified, SignatureVerified};
use log::error;
#[tracing::instrument(name = "Inbox", skip_all, fields(id = tracing::field::debug(&input.id_unchecked()), kind = tracing::field::debug(&input.kind())))]
#[allow(clippy::too_many_arguments)]
pub(crate) async fn route(
pub async fn route(
state: web::Data<State>,
actors: web::Data<ActorCache>,
config: web::Data<Config>,
client: web::Data<Requests>,
jobs: web::Data<JobServer>,
input: web::Json<AcceptedActivities>,
digest_verified: Option<DigestVerified>,
signature_verified: Option<SignatureVerified>,
) -> Result<HttpResponse, Error> {
verified: Option<(SignatureVerified, DigestVerified)>,
) -> Result<HttpResponse, MyError> {
let input = input.into_inner();
let kind = input.kind().ok_or(ErrorKind::MissingKind)?;
let actor = actors
.get(
input.actor().as_single_id().ok_or(MyError::MissingId)?,
&client,
)
.await?
.into_inner();
if digest_verified.is_some() && signature_verified.is_none() && *kind == ValidTypes::Delete {
return Ok(accepted(serde_json::json!({})));
} else if config.validate_signatures()
&& (digest_verified.is_none() || signature_verified.is_none())
{
return Err(ErrorKind::NoSignature(None).into());
let (is_blocked, is_whitelisted, is_listener) = join!(
state.is_blocked(&actor.id),
state.is_whitelisted(&actor.id),
state.is_listener(&actor.inbox)
);
if is_blocked {
return Err(MyError::Blocked(actor.id.to_string()));
}
let actor_id = if input.id_unchecked().is_some() {
input.actor()?.as_single_id().ok_or(ErrorKind::MissingId)?
} else {
input
.actor_unchecked()
.as_single_id()
.ok_or(ErrorKind::MissingId)?
};
if !is_whitelisted {
return Err(MyError::Whitelist(actor.id.to_string()));
}
let actor = actors.get(actor_id, &client).await?.into_inner();
if !is_listener && !valid_without_listener(&input)? {
return Err(MyError::NotSubscribed(actor.inbox.to_string()));
}
if let Some(verified) = signature_verified {
if actor.public_key_id.as_str() != verified.key_id() {
tracing::error!("Actor signed with wrong key");
return Err(ErrorKind::BadActor(
actor.public_key_id.to_string(),
verified.key_id().to_owned(),
)
.into());
}
if config.validate_signatures() && verified.is_none() {
return Err(MyError::NoSignature(actor.public_key_id.to_string()));
} else if config.validate_signatures() {
tracing::error!("This case should never be reachable, since I handle signature checks earlier in the flow. If you see this in a log it means I did it wrong");
return Err(ErrorKind::NoSignature(Some(actor.public_key_id.to_string())).into());
if let Some((verified, _)) = verified {
if actor.public_key_id.as_str() != verified.key_id() {
error!("Bad actor, more info: {:?}", input);
return Err(MyError::BadActor(
actor.public_key_id.to_string(),
verified.key_id().to_owned(),
));
}
}
}
let is_allowed = state.db.is_allowed(actor.id.clone()).await?;
let is_connected = state.db.is_connected(actor.id.clone()).await?;
if !is_allowed {
return Err(ErrorKind::NotAllowed(actor.id.to_string()).into());
}
if !is_connected && !valid_without_listener(&input)? {
return Err(ErrorKind::NotSubscribed(actor.id.to_string()).into());
}
match kind {
match input.kind().ok_or(MyError::MissingKind)? {
ValidTypes::Accept => handle_accept(&config, input).await?,
ValidTypes::Reject => handle_reject(&config, &jobs, input, actor).await?,
ValidTypes::Announce | ValidTypes::Create => {
handle_announce(&state, &jobs, input, actor).await?
}
ValidTypes::Follow => handle_follow(&config, &jobs, input, actor).await?,
ValidTypes::Add | ValidTypes::Delete | ValidTypes::Remove | ValidTypes::Update => {
handle_forward(&jobs, input, actor).await?
}
ValidTypes::Undo => handle_undo(&config, &jobs, input, actor, is_connected).await?,
ValidTypes::Follow => handle_follow(&config, &jobs, input, actor, is_listener).await?,
ValidTypes::Delete | ValidTypes::Update => handle_forward(&jobs, input, actor).await?,
ValidTypes::Undo => handle_undo(&config, &jobs, input, actor, is_listener).await?,
};
Ok(accepted(serde_json::json!({})))
}
fn valid_without_listener(input: &AcceptedActivities) -> Result<bool, Error> {
fn valid_without_listener(input: &AcceptedActivities) -> Result<bool, MyError> {
match input.kind() {
Some(ValidTypes::Follow) => Ok(true),
Some(ValidTypes::Undo) => Ok(single_object(input.object_unchecked())?.is_kind("Follow")),
Some(ValidTypes::Undo) => Ok(single_object(input.object())?.is_kind("Follow")),
_ => Ok(false),
}
}
fn kind_str(base: &AnyBase) -> Result<&str, Error> {
base.kind_str()
.ok_or(ErrorKind::MissingKind)
.map_err(Into::into)
fn kind_str(base: &AnyBase) -> Result<&str, MyError> {
base.kind_str().ok_or(MyError::MissingKind)
}
fn id_string(id: Option<&IriString>) -> Result<String, Error> {
id.map(|s| s.to_string())
.ok_or(ErrorKind::MissingId)
.map_err(Into::into)
fn id_string(id: Option<&XsdAnyUri>) -> Result<String, MyError> {
id.map(|s| s.to_string()).ok_or(MyError::MissingId)
}
fn single_object(o: &OneOrMany<AnyBase>) -> Result<&AnyBase, Error> {
o.as_one().ok_or(ErrorKind::ObjectCount).map_err(Into::into)
fn single_object(o: &OneOrMany<AnyBase>) -> Result<&AnyBase, MyError> {
o.as_one().ok_or(MyError::ObjectCount)
}
async fn handle_accept(config: &Config, input: AcceptedActivities) -> Result<(), Error> {
let base = single_object(input.object_unchecked())?.clone();
async fn handle_accept(config: &Config, input: AcceptedActivities) -> Result<(), MyError> {
let base = single_object(input.object())?.clone();
let follow = if let Some(follow) = activity::Follow::from_any_base(base)? {
follow
} else {
return Err(ErrorKind::Kind(
kind_str(single_object(input.object_unchecked())?)?.to_owned(),
)
.into());
return Err(MyError::Kind(
kind_str(single_object(input.object())?)?.to_owned(),
));
};
if !follow.actor_is(&config.generate_url(UrlKind::Actor)) {
return Err(ErrorKind::WrongActor(id_string(follow.actor()?.as_single_id())?).into());
return Err(MyError::WrongActor(id_string(
follow.actor().as_single_id(),
)?));
}
Ok(())
@ -139,22 +129,23 @@ async fn handle_reject(
jobs: &JobServer,
input: AcceptedActivities,
actor: Actor,
) -> Result<(), Error> {
let base = single_object(input.object_unchecked())?.clone();
) -> Result<(), MyError> {
let base = single_object(input.object())?.clone();
let follow = if let Some(follow) = activity::Follow::from_any_base(base)? {
follow
} else {
return Err(ErrorKind::Kind(
kind_str(single_object(input.object_unchecked())?)?.to_owned(),
)
.into());
return Err(MyError::Kind(
kind_str(single_object(input.object())?)?.to_owned(),
));
};
if !follow.actor_is(&config.generate_url(UrlKind::Actor)) {
return Err(ErrorKind::WrongActor(id_string(follow.actor()?.as_single_id())?).into());
return Err(MyError::WrongActor(id_string(
follow.actor().as_single_id(),
)?));
}
jobs.queue(Reject(actor)).await?;
jobs.queue(Reject(actor))?;
Ok(())
}
@ -165,34 +156,33 @@ async fn handle_undo(
input: AcceptedActivities,
actor: Actor,
is_listener: bool,
) -> Result<(), Error> {
let any_base = single_object(input.object_unchecked())?.clone();
) -> Result<(), MyError> {
let any_base = single_object(input.object())?.clone();
let undone_object =
AcceptedUndoObjects::from_any_base(any_base)?.ok_or(ErrorKind::ObjectFormat)?;
AcceptedUndoObjects::from_any_base(any_base)?.ok_or(MyError::ObjectFormat)?;
if !undone_object.is_kind(&UndoTypes::Follow) {
if is_listener {
jobs.queue(Forward::new(input, actor)).await?;
jobs.queue(Forward::new(input, actor))?;
return Ok(());
} else {
return Err(ErrorKind::NotSubscribed(actor.id.to_string()).into());
return Err(MyError::NotSubscribed(actor.inbox.to_string()));
}
}
let my_id: IriString = config.generate_url(UrlKind::Actor);
let my_id: XsdAnyUri = config.generate_url(UrlKind::Actor);
if !undone_object.object_is(&my_id) && !undone_object.object_is(&public()) {
return Err(ErrorKind::WrongActor(id_string(
undone_object.object_unchecked().as_single_id(),
)?)
.into());
return Err(MyError::WrongActor(id_string(
undone_object.object().as_single_id(),
)?));
}
if !is_listener {
return Ok(());
}
jobs.queue(Undo::new(input, actor)).await?;
jobs.queue(Undo::new(input, actor))?;
Ok(())
}
@ -200,8 +190,8 @@ async fn handle_forward(
jobs: &JobServer,
input: AcceptedActivities,
actor: Actor,
) -> Result<(), Error> {
jobs.queue(Forward::new(input, actor)).await?;
) -> Result<(), MyError> {
jobs.queue(Forward::new(input, actor))?;
Ok(())
}
@ -211,18 +201,14 @@ async fn handle_announce(
jobs: &JobServer,
input: AcceptedActivities,
actor: Actor,
) -> Result<(), Error> {
let object_id = input
.object_unchecked()
.as_single_id()
.ok_or(ErrorKind::MissingId)?;
) -> Result<(), MyError> {
let object_id = input.object().as_single_id().ok_or(MyError::MissingId)?;
if state.is_cached(object_id) {
return Err(ErrorKind::Duplicate.into());
if state.is_cached(object_id).await {
return Err(MyError::Duplicate);
}
jobs.queue(Announce::new(object_id.to_owned(), actor))
.await?;
jobs.queue(Announce::new(object_id.to_owned(), actor))?;
Ok(())
}
@ -232,16 +218,17 @@ async fn handle_follow(
jobs: &JobServer,
input: AcceptedActivities,
actor: Actor,
) -> Result<(), Error> {
let my_id: IriString = config.generate_url(UrlKind::Actor);
is_listener: bool,
) -> Result<(), MyError> {
let my_id: XsdAnyUri = config.generate_url(UrlKind::Actor);
if !input.object_is(&my_id) && !input.object_is(&public()) {
return Err(
ErrorKind::WrongActor(id_string(input.object_unchecked().as_single_id())?).into(),
);
return Err(MyError::WrongActor(id_string(
input.object().as_single_id(),
)?));
}
jobs.queue(Follow::new(input, actor)).await?;
jobs.queue(Follow::new(is_listener, input, actor))?;
Ok(())
}

View file

@ -1,91 +1,22 @@
use crate::{
config::Config,
data::{Node, State},
error::{Error, ErrorKind},
};
use crate::{config::Config, data::State, error::MyError};
use actix_web::{web, HttpResponse};
use log::error;
use rand::{seq::SliceRandom, thread_rng};
use std::io::BufWriter;
const MINIFY_CONFIG: minify_html::Cfg = minify_html::Cfg {
do_not_minify_doctype: true,
ensure_spec_compliant_unquoted_attribute_values: true,
keep_closing_tags: true,
keep_html_and_head_opening_tags: false,
keep_spaces_between_attributes: true,
keep_comments: false,
keep_input_type_text_attr: true,
keep_ssi_comments: false,
preserve_brace_template_syntax: false,
preserve_chevron_percent_template_syntax: false,
minify_css: true,
minify_js: true,
remove_bangs: true,
remove_processing_instructions: true,
};
fn open_reg(node: &Node) -> bool {
node.instance
.as_ref()
.map(|i| i.reg)
.or_else(|| node.info.as_ref().map(|i| i.reg))
.unwrap_or(false)
}
#[tracing::instrument(name = "Index", skip(config, state))]
pub(crate) async fn route(
pub async fn route(
state: web::Data<State>,
config: web::Data<Config>,
) -> Result<HttpResponse, Error> {
let all_nodes = state.node_cache.nodes().await?;
let mut nodes = Vec::new();
let mut local = Vec::new();
for node in all_nodes {
if !state.is_connected(&node.base) {
continue;
}
if node
.base
.authority_str()
.map(|authority| {
config
.local_domains()
.iter()
.any(|domain| domain.as_str() == authority)
})
.unwrap_or(false)
{
local.push(node);
} else {
nodes.push(node);
}
}
nodes.sort_by(|lhs, rhs| match (open_reg(lhs), open_reg(rhs)) {
(true, true) | (false, false) => std::cmp::Ordering::Equal,
(true, false) => std::cmp::Ordering::Less,
(false, true) => std::cmp::Ordering::Greater,
});
if let Some((i, _)) = nodes.iter().enumerate().find(|(_, node)| !open_reg(node)) {
nodes[..i].shuffle(&mut thread_rng());
nodes[i..].shuffle(&mut thread_rng());
} else {
nodes.shuffle(&mut thread_rng());
}
) -> Result<HttpResponse, MyError> {
let mut nodes = state.node_cache().nodes().await;
nodes.shuffle(&mut thread_rng());
let mut buf = BufWriter::new(Vec::new());
crate::templates::index_html(&mut buf, &local, &nodes, &config)?;
let html = buf.into_inner().map_err(|e| {
tracing::error!("Error rendering template, {}", e.error());
ErrorKind::FlushBuffer
crate::templates::index(&mut buf, &nodes, &config)?;
let buf = buf.into_inner().map_err(|e| {
error!("Error rendering template, {}", e.error());
MyError::FlushBuffer
})?;
let html = minify_html::minify(&html, &MINIFY_CONFIG);
Ok(HttpResponse::Ok().content_type("text/html").body(html))
Ok(HttpResponse::Ok().content_type("text/html").body(buf))
}

View file

@ -1,42 +1,42 @@
use crate::{
data::MediaCache,
error::Error,
requests::{BreakerStrategy, Requests},
stream::limit_stream,
use crate::{data::Media, error::MyError, requests::Requests};
use actix_web::{
http::header::{CacheControl, CacheDirective},
web, HttpResponse,
};
use actix_web::{body::BodyStream, web, HttpResponse};
use bytes::Bytes;
use uuid::Uuid;
// 16 MB
const IMAGE_SIZE_LIMIT: usize = 16 * 1024 * 1024;
#[tracing::instrument(name = "Media", skip(media, requests))]
pub(crate) async fn route(
media: web::Data<MediaCache>,
pub async fn route(
media: web::Data<Media>,
requests: web::Data<Requests>,
uuid: web::Path<Uuid>,
) -> Result<HttpResponse, Error> {
) -> Result<HttpResponse, MyError> {
let uuid = uuid.into_inner();
if let Some((content_type, bytes)) = media.get_bytes(uuid).await {
return Ok(cached(content_type, bytes));
}
if let Some(url) = media.get_url(uuid).await? {
let res = requests
.fetch_response(&url, BreakerStrategy::Allow404AndBelow)
.await?;
let (content_type, bytes) = requests.fetch_bytes(url.as_str()).await?;
let mut response = HttpResponse::build(crate::http1::status_to_http02(res.status()));
media
.store_bytes(uuid, content_type.clone(), bytes.clone())
.await;
for (name, value) in res.headers().iter().filter(|(h, _)| *h != "connection") {
response.insert_header((
crate::http1::name_to_http02(name),
crate::http1::value_to_http02(value),
));
}
return Ok(response.body(BodyStream::new(limit_stream(
res.bytes_stream(),
IMAGE_SIZE_LIMIT,
))));
return Ok(cached(content_type, bytes));
}
Ok(HttpResponse::NotFound().finish())
}
fn cached(content_type: String, bytes: Bytes) -> HttpResponse {
HttpResponse::Ok()
.set(CacheControl(vec![
CacheDirective::Public,
CacheDirective::MaxAge(60 * 60 * 24),
CacheDirective::Extension("immutable".to_owned(), None),
]))
.content_type(content_type)
.body(bytes)
}

View file

@ -1,14 +1,12 @@
mod actor;
mod healthz;
mod inbox;
mod index;
mod media;
mod nodeinfo;
mod statics;
pub(crate) use self::{
pub use self::{
actor::route as actor,
healthz::route as healthz,
inbox::route as inbox,
index::route as index,
media::route as media,
@ -25,7 +23,7 @@ fn ok<T>(item: T) -> HttpResponse
where
T: Serialize,
{
HttpResponse::Ok().content_type(CONTENT_TYPE).json(&item)
HttpResponse::Ok().content_type(CONTENT_TYPE).json(item)
}
fn accepted<T>(item: T) -> HttpResponse
@ -34,5 +32,5 @@ where
{
HttpResponse::Accepted()
.content_type(CONTENT_TYPE)
.json(&item)
.json(item)
}

View file

@ -5,8 +5,7 @@ use crate::{
use actix_web::{web, Responder};
use actix_webfinger::Link;
#[tracing::instrument(name = "Well Known NodeInfo", skip(config))]
pub(crate) async fn well_known(config: web::Data<Config>) -> impl Responder {
pub async fn well_known(config: web::Data<Config>) -> impl Responder {
web::Json(Links {
links: vec![Link {
rel: "http://nodeinfo.diaspora.software/ns/schema/2.0".to_owned(),
@ -15,8 +14,7 @@ pub(crate) async fn well_known(config: web::Data<Config>) -> impl Responder {
kind: None,
}],
})
.customize()
.insert_header(("Content-Type", "application/jrd+json"))
.with_header("Content-Type", "application/jrd+json")
}
#[derive(serde::Serialize)]
@ -24,40 +22,19 @@ struct Links {
links: Vec<Link>,
}
#[tracing::instrument(name = "NodeInfo", skip_all)]
pub(crate) async fn route(
config: web::Data<Config>,
state: web::Data<State>,
) -> web::Json<NodeInfo> {
let inboxes = state.db.inboxes().await;
let blocks = if config.publish_blocks() {
Some(state.db.blocks().await.unwrap_or_default())
} else {
None
};
let peers = inboxes
.unwrap_or_default()
.iter()
.filter_map(|listener| listener.authority_str())
.map(|s| s.to_owned())
.collect();
let open_registrations = !config.restricted_mode();
pub async fn route(config: web::Data<Config>, state: web::Data<State>) -> web::Json<NodeInfo> {
web::Json(NodeInfo {
version: NodeInfoVersion,
software: Software {
name: Config::software_name().to_lowercase(),
version: Config::software_version(),
name: config.software_name().to_lowercase(),
version: config.software_version(),
},
protocols: vec![Protocol::ActivityPub],
services: Services {
inbound: vec![],
outbound: vec![],
},
open_registrations,
open_registrations: false,
usage: Usage {
users: Users {
total: 1,
@ -67,7 +44,20 @@ pub(crate) async fn route(
local_posts: 0,
local_comments: 0,
},
metadata: Metadata { peers, blocks },
metadata: Metadata {
peers: state
.listeners()
.await
.iter()
.filter_map(|listener| listener.as_url().domain())
.map(|s| s.to_owned())
.collect(),
blocks: if config.publish_blocks() {
Some(state.blocks().await)
} else {
None
},
},
})
}

Some files were not shown because too many files have changed in this diff Show more