Compare commits

..

No commits in common. "main" and "v0.3.108" have entirely different histories.

29 changed files with 1407 additions and 2169 deletions

421
.drone.yml Normal file
View file

@ -0,0 +1,421 @@
kind: pipeline
type: docker
name: clippy
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: clippy
image: asonix/rust-builder:latest-linux-amd64
pull: always
commands:
- rustup component add clippy
- cargo clippy --no-deps -- -D warnings
trigger:
event:
- push
- pull_request
- tag
---
kind: pipeline
type: docker
name: tests
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: tests
image: asonix/rust-builder:latest-linux-amd64
pull: always
commands:
- cargo test
trigger:
event:
- push
- pull_request
- tag
---
kind: pipeline
type: docker
name: check-amd64
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: check
image: asonix/rust-builder:latest-linux-amd64
pull: always
commands:
- cargo check --target=$TARGET
trigger:
event:
- push
- pull_request
---
kind: pipeline
type: docker
name: build-amd64
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: build
image: asonix/rust-builder:latest-linux-amd64
pull: always
commands:
- cargo build --target=$TARGET --release
- $TOOL-strip target/$TARGET/release/relay
- cp target/$TARGET/release/relay .
- cp relay relay-linux-amd64
- name: push
image: plugins/docker:20
settings:
username: asonix
password:
from_secret: dockerhub_token
repo: asonix/relay
dockerfile: docker/drone/Dockerfile
auto_tag: true
auto_tag_suffix: linux-amd64
build_args:
- REPO_ARCH=amd64
- name: publish
image: plugins/gitea-release:1
settings:
api_key:
from_secret: gitea_token
base_url: https://git.asonix.dog
files:
- relay-linux-amd64
depends_on:
- clippy
- tests
trigger:
event:
- tag
---
kind: pipeline
type: docker
name: check-arm64v8
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: check
image: asonix/rust-builder:latest-linux-arm64v8
pull: always
commands:
- cargo check --target=$TARGET
trigger:
event:
- push
- pull_request
---
kind: pipeline
type: docker
name: build-arm64v8
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: build
image: asonix/rust-builder:latest-linux-arm64v8
pull: always
commands:
- cargo build --target=$TARGET --release
- $TOOL-strip target/$TARGET/release/relay
- cp target/$TARGET/release/relay .
- cp relay relay-linux-arm64v8
- name: push
image: plugins/docker:20
settings:
username: asonix
password:
from_secret: dockerhub_token
repo: asonix/relay
dockerfile: docker/drone/Dockerfile
auto_tag: true
auto_tag_suffix: linux-arm64v8
build_args:
- REPO_ARCH=arm64v8
- name: publish
image: plugins/gitea-release:1
settings:
api_key:
from_secret: gitea_token
base_url: https://git.asonix.dog
files:
- relay-linux-arm64v8
depends_on:
- clippy
- tests
trigger:
event:
- tag
---
kind: pipeline
type: docker
name: check-arm32v7
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: check
image: asonix/rust-builder:latest-linux-arm32v7
pull: always
commands:
- cargo check --target=$TARGET
trigger:
event:
- push
- pull_request
---
kind: pipeline
type: docker
name: build-arm32v7
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: build
image: asonix/rust-builder:latest-linux-arm32v7
pull: always
commands:
- cargo build --target=$TARGET --release
- $TOOL-strip target/$TARGET/release/relay
- cp target/$TARGET/release/relay .
- cp relay relay-linux-arm32v7
- name: push
image: plugins/docker:20
settings:
username: asonix
password:
from_secret: dockerhub_token
repo: asonix/relay
dockerfile: docker/drone/Dockerfile
auto_tag: true
auto_tag_suffix: linux-arm32v7
build_args:
- REPO_ARCH=arm32v7
- name: publish
image: plugins/gitea-release:1
settings:
api_key:
from_secret: gitea_token
base_url: https://git.asonix.dog
files:
- relay-linux-arm32v7
depends_on:
- clippy
- tests
trigger:
event:
- tag
---
kind: pipeline
type: docker
name: manifest
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: manifest
image: plugins/manifest:1
settings:
username: asonix
password:
from_secret: dockerhub_token
dump: true
auto_tag: true
ignore_missing: true
spec: docker/drone/manifest.tmpl
depends_on:
- build-amd64
- build-arm64v8
- build-arm32v7
trigger:
event:
- tag
---
kind: pipeline
type: docker
name: publish-crate
platform:
arch: amd64
clone:
disable: true
steps:
- name: clone
image: alpine/git:latest
user: root
commands:
- git clone $DRONE_GIT_HTTP_URL .
- git checkout $DRONE_COMMIT
- chown -R 991:991 .
- name: publish
image: asonix/rust-builder:latest-linux-amd64
pull: always
environment:
CRATES_IO_TOKEN:
from_secret: crates_io_token
commands:
- cargo publish --token $CRATES_IO_TOKEN
depends_on:
- build-amd64
- build-arm64v8
- build-arm32v7
trigger:
event:
- tag

View file

@ -1,61 +0,0 @@
on:
push:
branches:
- '*'
pull_request:
branches:
- main
jobs:
clippy:
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Clippy
run: |
cargo clippy --no-default-features -- -D warnings
tests:
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Test
run: cargo test
check:
strategy:
fail-fast: false
matrix:
target:
- x86_64-unknown-linux-musl
- armv7-unknown-linux-musleabihf
- aarch64-unknown-linux-musl
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Debug builds
run: cargo zigbuild --target ${{ matrix.target }}

View file

@ -1,226 +0,0 @@
on:
push:
tags:
- 'v*.*.*'
env:
REGISTRY_IMAGE: asonix/relay
jobs:
clippy:
runs-on: base-image
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Clippy
run: |
# cargo clippy --no-default-features -- -D warnings
cargo clippy --no-default-features
tests:
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Test
run: cargo test
build:
needs:
- clippy
- tests
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
strategy:
fail-fast: false
matrix:
info:
- target: x86_64-unknown-linux-musl
artifact: linux-amd64
platform: linux/amd64
- target: armv7-unknown-linux-musleabihf
artifact: linux-arm32v7
platform: linux/arm/v7
- target: aarch64-unknown-linux-musl
artifact: linux-arm64v8
platform: linux/arm64
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Prepare Platform
run: |
platform=${{ matrix.info.platform }}
echo "PLATFORM_PAIR=${platform//\//-}" >> $GITHUB_ENV
shell: bash
-
name: Docker meta
id: meta
uses: https://github.com/docker/metadata-action@v5
with:
images: ${{ env.REGISTRY_IMAGE }}
flavor: |
latest=auto
suffix=-${{ matrix.info.artifact }}
tags: |
type=raw,value=latest,enable={{ is_default_branch }}
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
-
name: Set up QEMU
uses: https://github.com/docker/setup-qemu-action@v3
-
name: Set up Docker Buildx
uses: https://github.com/docker/setup-buildx-action@v3
-
name: Docker login
uses: https://github.com/docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
name: Compile relay
run: cargo zigbuild --target ${{ matrix.info.target }} --release
-
name: Prepare artifacts
run: |
mkdir artifacts
cp target/${{ matrix.info.target }}/release/relay artifacts/relay-${{ matrix.info.artifact }}
-
uses: https://github.com/actions/upload-artifact@v3
with:
name: binaries
path: artifacts/
-
name: Prepare binary
run: |
cp target/${{ matrix.info.target }}/release/relay docker/forgejo/relay
-
name: Build and push ${{ matrix.info.platform }} docker image
id: build
uses: docker/build-push-action@v5
with:
context: ./docker/forgejo
platforms: ${{ matrix.info.platform }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
outputs: type=image,name=${{ env.REGISTRY_IMAGE }},name-canonical=true,push=true
-
name: Export digest
run: |
mkdir -p /tmp/digests
digest="${{ steps.build.outputs.digest }}"
touch "/tmp/digests/${digest#sha256:}"
echo "Created /tmp/digests/${digest#sha256:}"
shell: bash
-
name: Upload ${{ matrix.info.platform }} digest
uses: https://github.com/actions/upload-artifact@v3
with:
name: digests
path: /tmp/digests/*
if-no-files-found: error
retention-days: 1
publish-docker:
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
needs: [build]
steps:
-
name: Download digests
uses: https://github.com/actions/download-artifact@v3
with:
name: digests
path: /tmp/digests
pattern: digests-*
merge-multiple: true
-
name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
-
name: Docker login
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
name: Docker meta
id: meta
uses: https://github.com/docker/metadata-action@v5
with:
images: ${{ env.REGISTRY_IMAGE }}
flavor: |
latest=auto
tags: |
type=raw,value=latest,enable={{ is_default_branch }}
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
-
name: Create manifest list and push
working-directory: /tmp/digests
run: |
tags=$(jq -cr '.tags | map("-t " + .) | join(" ")' <<< "${DOCKER_METADATA_OUTPUT_JSON}")
images=$(printf "${{ env.REGISTRY_IMAGE }}@sha256:%s " *)
echo "Running 'docker buildx imagetools create ${tags[@]} ${images[@]}'"
docker buildx imagetools create ${tags[@]} ${images[@]}
shell: bash
-
name: Inspect Image
run: |
docker buildx imagetools inspect ${{ env.REGISTRY_IMAGE }}:${{ steps.meta.outputs.version }}
publish-forgejo:
needs: [build]
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
- uses: https://github.com/actions/download-artifact@v3
with:
name: binaries
path: artifacts/
merge-multiple: true
- uses: actions/forgejo-release@v1
with:
direction: upload
token: ${{ secrets.GITHUB_TOKEN }}
release-dir: artifacts/
publish-crate:
needs: [build]
runs-on: docker
container:
image: docker.io/asonix/actions-base-image:0.1
steps:
-
name: Checkout relay
uses: https://github.com/actions/checkout@v4
-
name: Cargo Cache
uses: https://git.asonix.dog/asonix/actions/cache-rust-dependencies@main
-
name: Publish Crate
run: cargo publish --token ${{ secrets.CRATES_IO_TOKEN }}

2344
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,7 +1,7 @@
[package]
name = "ap-relay"
description = "A simple activitypub relay"
version = "0.3.116"
version = "0.3.108"
authors = ["asonix <asonix@asonix.dog>"]
license = "AGPL-3.0"
readme = "README.md"
@ -14,9 +14,6 @@ build = "src/build.rs"
name = "relay"
path = "src/main.rs"
[profile.release]
strip = true
[features]
console = ["dep:console-subscriber"]
default = []
@ -24,60 +21,60 @@ default = []
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
actix-web = { version = "4.4.0", default-features = false, features = ["compress-brotli", "compress-gzip", "rustls-0_23"] }
actix-web = { version = "4.4.0", default-features = false, features = ["compress-brotli", "compress-gzip", "rustls-0_22"] }
actix-webfinger = { version = "0.5.0", default-features = false }
activitystreams = "0.7.0-alpha.25"
activitystreams-ext = "0.1.0-alpha.3"
ammonia = "4.0.0"
async-cpupool = "0.3.0"
bcrypt = "0.16"
base64 = "0.22"
ammonia = "3.1.0"
async-cpupool = "0.2.0"
bcrypt = "0.15"
base64 = "0.21"
clap = { version = "4.0.0", features = ["derive"] }
color-eyre = "0.6.2"
config = { version = "0.14.0", default-features = false, features = ["toml", "json", "yaml"] }
console-subscriber = { version = "0.4", optional = true }
dashmap = "6.0.1"
console-subscriber = { version = "0.2", optional = true }
dashmap = "5.1.0"
dotenv = "0.15.0"
futures-core = "0.3.30"
flume = "0.11.0"
lru = "0.12.0"
metrics = "0.23.0"
metrics-exporter-prometheus = { version = "0.15.0", default-features = false, features = [
metrics = "0.22.0"
metrics-exporter-prometheus = { version = "0.13.0", default-features = false, features = [
"http-listener",
] }
metrics-util = "0.17.0"
metrics-util = "0.16.0"
mime = "0.3.16"
minify-html = "0.15.0"
opentelemetry = "0.27.1"
opentelemetry_sdk = { version = "0.27", features = ["rt-tokio"] }
opentelemetry-otlp = { version = "0.27", features = ["grpc-tonic"] }
opentelemetry = "0.21"
opentelemetry_sdk = { version = "0.21", features = ["rt-tokio"] }
opentelemetry-otlp = "0.14"
pin-project-lite = "0.2.9"
# pinned to metrics-util
quanta = "0.12.0"
rand = "0.8"
reqwest = { version = "0.12", default-features = false, features = ["rustls-tls", "stream"]}
reqwest-middleware = { version = "0.4", default-features = false, features = ["json"] }
reqwest-tracing = "0.5.0"
reqwest = { version = "0.11", default-features = false, features = ["rustls-tls", "stream"]}
reqwest-middleware = "0.2"
reqwest-tracing = "0.4.5"
ring = "0.17.5"
rsa = "0.9"
rsa = { version = "0.9" }
rsa-magic-public-key = "0.8.0"
rustls = { version = "0.23.0", default-features = false, features = ["ring", "logging", "std", "tls12"] }
rustls-channel-resolver = "0.3.0"
rustls = "0.22.0"
rustls-channel-resolver = "0.2.0"
rustls-pemfile = "2"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
sled = "0.34.7"
streem = "0.2.0"
teloxide = { version = "0.13.0", default-features = false, features = [
teloxide = { version = "0.12.0", default-features = false, features = [
"ctrlc_handler",
"macros",
"rustls",
] }
thiserror = "2.0"
thiserror = "1.0"
time = { version = "0.3.17", features = ["serde"] }
tracing = "0.1"
tracing-error = "0.2"
tracing-log = "0.2"
tracing-opentelemetry = "0.28"
tracing-opentelemetry = "0.22"
tracing-subscriber = { version = "0.3", features = [
"ansi",
"env-filter",
@ -87,17 +84,17 @@ tokio = { version = "1", features = ["full", "tracing"] }
uuid = { version = "1", features = ["v4", "serde"] }
[dependencies.background-jobs]
version = "0.19.0"
version = "0.18.0"
default-features = false
features = ["error-logging", "metrics", "tokio"]
[dependencies.http-signature-normalization-actix]
version = "0.11.1"
version = "0.11.0"
default-features = false
features = ["server", "ring"]
[dependencies.http-signature-normalization-reqwest]
version = "0.13.0"
version = "0.11.0"
default-features = false
features = ["middleware", "ring"]

11
docker/drone/Dockerfile Normal file
View file

@ -0,0 +1,11 @@
ARG REPO_ARCH
FROM asonix/rust-runner:latest-linux-$REPO_ARCH
COPY relay /usr/local/bin/relay
USER app
EXPOSE 8080
VOLUME /mnt
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["/usr/local/bin/relay"]

View file

@ -0,0 +1,25 @@
image: asonix/relay:{{#if build.tag}}{{trimPrefix "v" build.tag}}{{else}}latest{{/if}}
{{#if build.tags}}
tags:
{{#each build.tags}}
- {{this}}
{{/each}}
{{/if}}
manifests:
-
image: asonix/relay:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-amd64
platform:
architecture: amd64
os: linux
-
image: asonix/relay:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-arm64v8
platform:
architecture: arm64
os: linux
variant: v8
-
image: asonix/relay:{{#if build.tag}}{{trimPrefix "v" build.tag}}-{{/if}}linux-arm32v7
platform:
architecture: arm
os: linux
variant: v7

View file

@ -1,24 +0,0 @@
FROM alpine:3.19
ARG UID=991
ARG GID=991
ENV \
UID=${UID} \
GID=${GID}
USER root
RUN \
addgroup -g "${GID}" app && \
adduser -D -G app -u "${UID}" -g "" -h /opt/app app && \
apk add tini && \
chown -R app:app /mnt
COPY relay /usr/local/bin/relay
USER app
EXPOSE 6669
EXPOSE 8080
VOLUME /mnt
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["/usr/local/bin/relay"]

View file

@ -2,7 +2,7 @@ version: '3.3'
services:
relay:
image: asonix/relay:0.3.115
image: asonix/relay:0.3.85
ports:
- "8079:8079"
restart: always

14
flake.lock generated
View file

@ -5,11 +5,11 @@
"systems": "systems"
},
"locked": {
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"lastModified": 1701680307,
"narHash": "sha256-kAuep2h5ajznlPMD9rnQyffWG8EM/C73lejGofXvdM8=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"rev": "4022d587cbbfd70fe950c1e2083a02621806a725",
"type": "github"
},
"original": {
@ -20,16 +20,16 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1733550349,
"narHash": "sha256-NcGumB4Lr6KSDq+nIqXtNA8QwAQKDSZT7N9OTGWbTrs=",
"lastModified": 1705133751,
"narHash": "sha256-rCIsyE80jgiOU78gCWN3A0wE0tR2GI5nH6MlS+HaaSQ=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "e2605d0744c2417b09f8bf850dfca42fcf537d34",
"rev": "9b19f5e77dd906cb52dade0b7bd280339d2a1f3d",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-24.11",
"ref": "nixos-unstable",
"repo": "nixpkgs",
"type": "github"
}

View file

@ -2,7 +2,7 @@
description = "relay";
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixos-24.11";
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
flake-utils.url = "github:numtide/flake-utils";
};

View file

@ -5,7 +5,7 @@
rustPlatform.buildRustPackage {
pname = "relay";
version = "0.3.116";
version = "0.3.108";
src = ./.;
cargoLock.lockFile = ./Cargo.lock;

View file

@ -5,6 +5,7 @@ use crate::{
error::{Error, ErrorKind},
extractors::XApiToken,
};
use actix_web::http::header::Header;
use reqwest_middleware::ClientWithMiddleware;
use serde::de::DeserializeOwned;
@ -86,17 +87,13 @@ async fn get_results<T: DeserializeOwned>(
let res = client
.get(iri.as_str())
.header(XApiToken::http1_name(), x_api_token.to_string())
.header(XApiToken::name(), x_api_token.to_string())
.send()
.await
.map_err(|e| ErrorKind::SendRequest(iri.to_string(), e.to_string()))?;
if !res.status().is_success() {
return Err(ErrorKind::Status(
iri.to_string(),
crate::http1::status_to_http02(res.status()),
)
.into());
return Err(ErrorKind::Status(iri.to_string(), res.status()).into());
}
let t = res
@ -119,7 +116,7 @@ async fn post_domains(
let res = client
.post(iri.as_str())
.header(XApiToken::http1_name(), x_api_token.to_string())
.header(XApiToken::name(), x_api_token.to_string())
.json(&Domains { domains })
.send()
.await

View file

@ -15,10 +15,6 @@ const MINUTES: u64 = 60 * SECONDS;
const HOURS: u64 = 60 * MINUTES;
const DAYS: u64 = 24 * HOURS;
pub(crate) fn recordable(len: usize) -> u32 {
((len as u64) % u64::from(u32::MAX)) as u32
}
type DistributionMap = BTreeMap<Vec<(String, String)>, Summary>;
#[derive(Clone)]
@ -303,14 +299,7 @@ impl Inner {
for sample in samples {
entry.add(*sample);
}
});
let mut total_len = 0;
for dist_map in d.values() {
total_len += dist_map.len();
}
metrics::gauge!("relay.collector.distributions.size").set(recordable(total_len));
})
}
let d = self.distributions.read().unwrap().clone();
@ -369,7 +358,6 @@ impl MemoryCollector {
) {
let mut d = self.inner.descriptions.write().unwrap();
d.entry(key.as_str().to_owned()).or_insert(description);
metrics::gauge!("relay.collector.descriptions.size").set(recordable(d.len()));
}
pub(crate) fn install(&self) -> Result<(), SetRecorderError<Self>> {

View file

@ -9,10 +9,10 @@ pub(crate) struct LastOnline {
impl LastOnline {
pub(crate) fn mark_seen(&self, iri: &IriStr) {
if let Some(authority) = iri.authority_str() {
let mut guard = self.domains.lock().unwrap();
guard.insert(authority.to_string(), OffsetDateTime::now_utc());
metrics::gauge!("relay.last-online.size",)
.set(crate::collector::recordable(guard.len()));
self.domains
.lock()
.unwrap()
.insert(authority.to_string(), OffsetDateTime::now_utc());
}
}

View file

@ -73,9 +73,7 @@ impl State {
}
pub(crate) fn cache(&self, object_id: IriString, actor_id: IriString) {
let mut guard = self.object_cache.write().unwrap();
guard.put(object_id, actor_id);
metrics::gauge!("relay.object-cache.size").set(crate::collector::recordable(guard.len()));
self.object_cache.write().unwrap().put(object_id, actor_id);
}
pub(crate) fn is_connected(&self, iri: &IriString) -> bool {

144
src/db.rs
View file

@ -7,7 +7,7 @@ use rsa::{
pkcs8::{DecodePrivateKey, EncodePrivateKey},
RsaPrivateKey,
};
use sled::{transaction::TransactionError, Batch, Transactional, Tree};
use sled::{Batch, Tree};
use std::{
collections::{BTreeMap, HashMap},
sync::{
@ -283,15 +283,10 @@ impl Db {
pub(crate) async fn check_health(&self) -> Result<(), Error> {
let next = self.inner.healthz_counter.fetch_add(1, Ordering::Relaxed);
self.unblock(move |inner| {
let res = inner
inner
.healthz
.insert("healthz", &next.to_be_bytes()[..])
.map_err(Error::from);
metrics::gauge!("relay.db.healthz.size")
.set(crate::collector::recordable(inner.healthz.len()));
res
.map_err(Error::from)
})
.await?;
self.inner.healthz.flush_async().await?;
@ -354,9 +349,6 @@ impl Db {
.actor_id_info
.insert(actor_id.as_str().as_bytes(), vec)?;
metrics::gauge!("relay.db.actor-id-info.size")
.set(crate::collector::recordable(inner.actor_id_info.len()));
Ok(())
})
.await
@ -391,9 +383,6 @@ impl Db {
.actor_id_instance
.insert(actor_id.as_str().as_bytes(), vec)?;
metrics::gauge!("relay.db.actor-id-instance.size")
.set(crate::collector::recordable(inner.actor_id_instance.len()));
Ok(())
})
.await
@ -428,9 +417,6 @@ impl Db {
.actor_id_contact
.insert(actor_id.as_str().as_bytes(), vec)?;
metrics::gauge!("relay.db.actor-id-contact.size")
.set(crate::collector::recordable(inner.actor_id_contact.len()));
Ok(())
})
.await
@ -461,12 +447,6 @@ impl Db {
inner
.media_url_media_id
.insert(url.as_str().as_bytes(), id.as_bytes())?;
metrics::gauge!("relay.db.media-id-media-url.size")
.set(crate::collector::recordable(inner.media_id_media_url.len()));
metrics::gauge!("relay.db.media-url-media-id.size")
.set(crate::collector::recordable(inner.media_url_media_id.len()));
Ok(())
})
.await
@ -558,14 +538,6 @@ impl Db {
inner
.actor_id_actor
.insert(actor.id.as_str().as_bytes(), vec)?;
metrics::gauge!("relay.db.public-key-actor-id.size").set(crate::collector::recordable(
inner.public_key_id_actor_id.len(),
));
metrics::gauge!("relay.db.actor-id-actor.size").set(crate::collector::recordable(
inner.public_key_id_actor_id.len(),
));
Ok(())
})
.await
@ -578,10 +550,6 @@ impl Db {
.connected_actor_ids
.remove(actor_id.as_str().as_bytes())?;
metrics::gauge!("relay.db.connected-actor-ids.size").set(crate::collector::recordable(
inner.connected_actor_ids.len(),
));
Ok(())
})
.await
@ -594,10 +562,6 @@ impl Db {
.connected_actor_ids
.insert(actor_id.as_str().as_bytes(), actor_id.as_str().as_bytes())?;
metrics::gauge!("relay.db.connected-actor-ids.size").set(crate::collector::recordable(
inner.connected_actor_ids.len(),
));
Ok(())
})
.await
@ -605,64 +569,30 @@ impl Db {
pub(crate) async fn add_blocks(&self, domains: Vec<String>) -> Result<(), Error> {
self.unblock(move |inner| {
let connected_by_domain = inner.connected_by_domain(&domains).collect::<Vec<_>>();
let res = (
&inner.connected_actor_ids,
&inner.blocked_domains,
&inner.allowed_domains,
)
.transaction(|(connected, blocked, allowed)| {
let mut connected_batch = Batch::default();
let mut blocked_batch = Batch::default();
let mut allowed_batch = Batch::default();
for connected in &connected_by_domain {
connected_batch.remove(connected.as_str().as_bytes());
}
for authority in &domains {
blocked_batch
.insert(domain_key(authority).as_bytes(), authority.as_bytes());
allowed_batch.remove(domain_key(authority).as_bytes());
}
connected.apply_batch(&connected_batch)?;
blocked.apply_batch(&blocked_batch)?;
allowed.apply_batch(&allowed_batch)?;
Ok(())
});
metrics::gauge!("relay.db.connected-actor-ids.size").set(crate::collector::recordable(
inner.connected_actor_ids.len(),
));
metrics::gauge!("relay.db.blocked-domains.size")
.set(crate::collector::recordable(inner.blocked_domains.len()));
metrics::gauge!("relay.db.allowed-domains.size")
.set(crate::collector::recordable(inner.allowed_domains.len()));
match res {
Ok(()) => Ok(()),
Err(TransactionError::Abort(e) | TransactionError::Storage(e)) => Err(e.into()),
for connected in inner.connected_by_domain(&domains) {
inner
.connected_actor_ids
.remove(connected.as_str().as_bytes())?;
}
for authority in &domains {
inner
.blocked_domains
.insert(domain_key(authority), authority.as_bytes())?;
inner.allowed_domains.remove(domain_key(authority))?;
}
Ok(())
})
.await
}
pub(crate) async fn remove_blocks(&self, domains: Vec<String>) -> Result<(), Error> {
self.unblock(move |inner| {
let mut blocked_batch = Batch::default();
for authority in &domains {
blocked_batch.remove(domain_key(authority).as_bytes());
inner.blocked_domains.remove(domain_key(authority))?;
}
inner.blocked_domains.apply_batch(blocked_batch)?;
metrics::gauge!("relay.db.blocked-domains.size")
.set(crate::collector::recordable(inner.blocked_domains.len()));
Ok(())
})
.await
@ -670,17 +600,12 @@ impl Db {
pub(crate) async fn add_allows(&self, domains: Vec<String>) -> Result<(), Error> {
self.unblock(move |inner| {
let mut allowed_batch = Batch::default();
for authority in &domains {
allowed_batch.insert(domain_key(authority).as_bytes(), authority.as_bytes());
inner
.allowed_domains
.insert(domain_key(authority), authority.as_bytes())?;
}
inner.allowed_domains.apply_batch(allowed_batch)?;
metrics::gauge!("relay.db.allowed-domains.size")
.set(crate::collector::recordable(inner.allowed_domains.len()));
Ok(())
})
.await
@ -689,32 +614,17 @@ impl Db {
pub(crate) async fn remove_allows(&self, domains: Vec<String>) -> Result<(), Error> {
self.unblock(move |inner| {
if inner.restricted_mode {
let connected_by_domain = inner.connected_by_domain(&domains).collect::<Vec<_>>();
let mut connected_batch = Batch::default();
for connected in &connected_by_domain {
connected_batch.remove(connected.as_str().as_bytes());
for connected in inner.connected_by_domain(&domains) {
inner
.connected_actor_ids
.remove(connected.as_str().as_bytes())?;
}
inner.connected_actor_ids.apply_batch(connected_batch)?;
metrics::gauge!("relay.db.connected-actor-ids.size").set(
crate::collector::recordable(inner.connected_actor_ids.len()),
);
}
let mut allowed_batch = Batch::default();
for authority in &domains {
allowed_batch.remove(domain_key(authority).as_bytes());
inner.allowed_domains.remove(domain_key(authority))?;
}
inner.allowed_domains.apply_batch(allowed_batch)?;
metrics::gauge!("relay.db.allowed-domains.size")
.set(crate::collector::recordable(inner.allowed_domains.len()));
Ok(())
})
.await
@ -755,10 +665,6 @@ impl Db {
inner
.settings
.insert("private-key".as_bytes(), pem_pkcs8.as_bytes())?;
metrics::gauge!("relay.db.settings.size")
.set(crate::collector::recordable(inner.settings.len()));
Ok(())
})
.await

View file

@ -123,9 +123,6 @@ pub(crate) enum ErrorKind {
#[error("Couldn't sign request")]
SignRequest,
#[error("Response body from server exceeded limits")]
BodyTooLarge,
#[error("Couldn't make request")]
Reqwest(#[from] reqwest::Error),

View file

@ -163,10 +163,6 @@ impl XApiToken {
pub(crate) fn new(token: String) -> Self {
Self(token)
}
pub(crate) const fn http1_name() -> reqwest::header::HeaderName {
reqwest::header::HeaderName::from_static("x-api-token")
}
}
impl Header for XApiToken {

View file

@ -1,18 +0,0 @@
pub(crate) fn name_to_http02(
name: &reqwest::header::HeaderName,
) -> actix_web::http::header::HeaderName {
actix_web::http::header::HeaderName::from_bytes(name.as_ref())
.expect("headername conversions always work")
}
pub(crate) fn value_to_http02(
value: &reqwest::header::HeaderValue,
) -> actix_web::http::header::HeaderValue {
actix_web::http::header::HeaderValue::from_bytes(value.as_bytes())
.expect("headervalue conversions always work")
}
pub(crate) fn status_to_http02(status: reqwest::StatusCode) -> actix_web::http::StatusCode {
actix_web::http::StatusCode::from_u16(status.as_u16())
.expect("statuscode conversions always work")
}

View file

@ -40,12 +40,7 @@ fn debug_object(activity: &serde_json::Value) -> &serde_json::Value {
object
}
pub(crate) fn build_storage() -> MetricsStorage<Storage<TokioTimer>> {
MetricsStorage::wrap(Storage::new(TokioTimer))
}
pub(crate) fn create_workers(
storage: MetricsStorage<Storage<TokioTimer>>,
state: State,
actors: ActorCache,
media: MediaCache,
@ -53,15 +48,18 @@ pub(crate) fn create_workers(
) -> std::io::Result<JobServer> {
let deliver_concurrency = config.deliver_concurrency();
let queue_handle = WorkerConfig::new(storage, move |queue_handle| {
JobState::new(
state.clone(),
actors.clone(),
JobServer::new(queue_handle),
media.clone(),
config.clone(),
)
})
let queue_handle = WorkerConfig::new(
MetricsStorage::wrap(Storage::new(TokioTimer)),
move |queue_handle| {
JobState::new(
state.clone(),
actors.clone(),
JobServer::new(queue_handle),
media.clone(),
config.clone(),
)
},
)
.register::<Deliver>()
.register::<DeliverMany>()
.register::<QueryNodeinfo>()

View file

@ -156,7 +156,7 @@ struct Link {
#[serde(untagged)]
enum MaybeSupported<T> {
Supported(T),
Unsupported(#[allow(unused)] String),
Unsupported(String),
}
impl<T> MaybeSupported<T> {
@ -165,8 +165,8 @@ impl<T> MaybeSupported<T> {
}
}
struct SupportedVersion(#[allow(unused)] String);
struct SupportedNodeinfo(#[allow(unused)] String);
struct SupportedVersion(String);
struct SupportedNodeinfo(String);
static SUPPORTED_VERSIONS: &str = "2.";
static SUPPORTED_NODEINFO: &str = "http://nodeinfo.diaspora.software/ns/schema/2.";

View file

@ -12,7 +12,7 @@ use error::Error;
use http_signature_normalization_actix::middleware::VerifySignature;
use metrics_exporter_prometheus::PrometheusBuilder;
use metrics_util::layers::FanoutBuilder;
use opentelemetry::{trace::TracerProvider, KeyValue};
use opentelemetry::KeyValue;
use opentelemetry_otlp::WithExportConfig;
use opentelemetry_sdk::Resource;
use reqwest_middleware::ClientWithMiddleware;
@ -33,13 +33,11 @@ mod db;
mod error;
mod extractors;
mod future;
mod http1;
mod jobs;
mod middleware;
mod requests;
mod routes;
mod spawner;
mod stream;
mod telegram;
use crate::config::UrlKind;
@ -83,21 +81,22 @@ fn init_subscriber(
let subscriber = subscriber.with(console_layer);
if let Some(url) = opentelemetry_url {
let exporter = opentelemetry_otlp::SpanExporter::builder()
.with_tonic()
.with_endpoint(url.as_str())
.build()?;
let tracer_provider = opentelemetry_sdk::trace::TracerProvider::builder()
.with_resource(Resource::new(vec![KeyValue::new(
"service.name",
software_name,
)]))
.with_batch_exporter(exporter, opentelemetry_sdk::runtime::Tokio)
.build();
let tracer = opentelemetry_otlp::new_pipeline()
.tracing()
.with_trace_config(
opentelemetry_sdk::trace::config().with_resource(Resource::new(vec![
KeyValue::new("service.name", software_name),
])),
)
.with_exporter(
opentelemetry_otlp::new_exporter()
.tonic()
.with_endpoint(url.as_str()),
)
.install_batch(opentelemetry_sdk::runtime::Tokio)?;
let otel_layer = tracing_opentelemetry::layer()
.with_tracer(tracer_provider.tracer(software_name))
.with_tracer(tracer)
.with_filter(targets);
let subscriber = subscriber.with(otel_layer);
@ -322,16 +321,10 @@ async fn server_main(
let sign_spawner2 = sign_spawner.clone();
let verify_spawner2 = verify_spawner.clone();
let config2 = config.clone();
let job_store = jobs::build_storage();
let server = HttpServer::new(move || {
let job_server = create_workers(
job_store.clone(),
state.clone(),
actors.clone(),
media.clone(),
config.clone(),
)
.expect("Failed to create job server");
let job_server =
create_workers(state.clone(), actors.clone(), media.clone(), config.clone())
.expect("Failed to create job server");
let app = App::new()
.app_data(web::Data::new(db.clone()))
@ -418,7 +411,7 @@ async fn server_main(
.with_no_client_auth()
.with_cert_resolver(cert_rx);
server
.bind_rustls_0_23(bind_address, server_config)?
.bind_rustls_0_22(bind_address, server_config)?
.run()
.await?;

View file

@ -80,7 +80,7 @@ where
fn call(&self, req: ServiceRequest) -> Self::Future {
let log_on_drop = LogOnDrop {
begin: Instant::now(),
path: format!("{:?}", req.match_pattern()),
path: req.path().to_string(),
method: req.method().to_string(),
arm: false,
};

View file

@ -2,7 +2,6 @@ use crate::{
data::LastOnline,
error::{Error, ErrorKind},
spawner::Spawner,
stream::{aggregate, limit_stream},
};
use activitystreams::iri_string::types::IriString;
use actix_web::http::header::Date;
@ -25,9 +24,6 @@ const ONE_MINUTE: u64 = 60 * ONE_SECOND;
const ONE_HOUR: u64 = 60 * ONE_MINUTE;
const ONE_DAY: u64 = 24 * ONE_HOUR;
// 20 KB
const JSON_SIZE_LIMIT: usize = 20 * 1024;
#[derive(Debug)]
pub(crate) enum BreakerStrategy {
// Requires a successful response
@ -233,11 +229,7 @@ impl Requests {
}
}
return Err(ErrorKind::Status(
parsed_url.to_string(),
crate::http1::status_to_http02(status),
)
.into());
return Err(ErrorKind::Status(parsed_url.to_string(), status).into());
}
// only actually succeed a breaker on 2xx response
@ -270,7 +262,7 @@ impl Requests {
where
T: serde::de::DeserializeOwned,
{
let stream = self
let body = self
.do_deliver(
url,
&serde_json::json!({}),
@ -279,9 +271,8 @@ impl Requests {
strategy,
)
.await?
.bytes_stream();
let body = aggregate(limit_stream(stream, JSON_SIZE_LIMIT)).await?;
.bytes()
.await?;
Ok(serde_json::from_slice(&body)?)
}
@ -308,12 +299,11 @@ impl Requests {
where
T: serde::de::DeserializeOwned,
{
let stream = self
let body = self
.do_fetch_response(url, accept, strategy)
.await?
.bytes_stream();
let body = aggregate(limit_stream(stream, JSON_SIZE_LIMIT)).await?;
.bytes()
.await?;
Ok(serde_json::from_slice(&body)?)
}

View file

@ -2,14 +2,10 @@ use crate::{
data::MediaCache,
error::Error,
requests::{BreakerStrategy, Requests},
stream::limit_stream,
};
use actix_web::{body::BodyStream, web, HttpResponse};
use uuid::Uuid;
// 16 MB
const IMAGE_SIZE_LIMIT: usize = 16 * 1024 * 1024;
#[tracing::instrument(name = "Media", skip(media, requests))]
pub(crate) async fn route(
media: web::Data<MediaCache>,
@ -23,19 +19,13 @@ pub(crate) async fn route(
.fetch_response(&url, BreakerStrategy::Allow404AndBelow)
.await?;
let mut response = HttpResponse::build(crate::http1::status_to_http02(res.status()));
let mut response = HttpResponse::build(res.status());
for (name, value) in res.headers().iter().filter(|(h, _)| *h != "connection") {
response.insert_header((
crate::http1::name_to_http02(name),
crate::http1::value_to_http02(value),
));
response.insert_header((name.clone(), value.clone()));
}
return Ok(response.body(BodyStream::new(limit_stream(
res.bytes_stream(),
IMAGE_SIZE_LIMIT,
))));
return Ok(response.body(BodyStream::new(res.bytes_stream())));
}
Ok(HttpResponse::NotFound().finish())

View file

@ -1,59 +0,0 @@
use crate::error::{Error, ErrorKind};
use actix_web::web::{Bytes, BytesMut};
use futures_core::Stream;
use streem::IntoStreamer;
pub(crate) fn limit_stream<'a, S>(
input: S,
limit: usize,
) -> impl Stream<Item = Result<Bytes, Error>> + Send + 'a
where
S: Stream<Item = reqwest::Result<Bytes>> + Send + 'a,
{
streem::try_from_fn(move |yielder| async move {
let stream = std::pin::pin!(input);
let mut stream = stream.into_streamer();
let mut count = 0;
while let Some(bytes) = stream.try_next().await? {
count += bytes.len();
if count > limit {
return Err(ErrorKind::BodyTooLarge.into());
}
yielder.yield_ok(bytes).await;
}
Ok(())
})
}
pub(crate) async fn aggregate<S>(input: S) -> Result<Bytes, Error>
where
S: Stream<Item = Result<Bytes, Error>>,
{
let stream = std::pin::pin!(input);
let mut streamer = stream.into_streamer();
let mut buf = Vec::new();
while let Some(bytes) = streamer.try_next().await? {
buf.push(bytes);
}
if buf.len() == 1 {
return Ok(buf.pop().expect("buf has exactly one element"));
}
let total_size: usize = buf.iter().map(|b| b.len()).sum();
let mut bytes_mut = BytesMut::with_capacity(total_size);
for bytes in &buf {
bytes_mut.extend_from_slice(&bytes);
}
Ok(bytes_mut.freeze())
}

View file

@ -75,8 +75,7 @@ pub(crate) fn start(admin_handle: String, db: Db, token: &str) {
fn is_admin(admin_handle: &str, message: &Message) -> bool {
message
.from
.as_ref()
.from()
.and_then(|user| user.username.as_deref())
.map(|username| username == admin_handle)
.unwrap_or(false)