diff --git a/.github/dependabot.yml b/.github/dependabot.yaml similarity index 64% rename from .github/dependabot.yml rename to .github/dependabot.yaml index 8f36cb692..becfbc1df 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yaml @@ -5,9 +5,15 @@ updates: schedule: interval: daily target-branch: "develop" + labels: + - "Continuous Integration" + - "Dependencies" - package-ecosystem: cargo directory: / schedule: interval: daily target-branch: "develop" + labels: + - "Build | Project System" + - "Dependencies" diff --git a/.github/labels.json b/.github/labels.json new file mode 100644 index 000000000..778c0c892 --- /dev/null +++ b/.github/labels.json @@ -0,0 +1,260 @@ +[ + { + "name": "- Admin -", + "color": "FFFFFF", + "description": "Enjoyable to Install and Setup our Software", + "aliases": [] + }, + { + "name": "- Contributor -", + "color": "FFFFFF", + "description": "Nice to support Torrust", + "aliases": [] + }, + { + "name": "- Developer -", + "color": "FFFFFF", + "description": "Torrust Improvement Experience", + "aliases": [] + }, + { + "name": "- User -", + "color": "FFFFFF", + "description": "Enjoyable to Use our Software", + "aliases": [] + }, + { + "name": "Blocked", + "color": "000000", + "description": "Has Unsatisfied Dependency", + "aliases": [] + }, + { + "name": "Bug", + "color": "a80506", + "description": "Incorrect Behavior", + "aliases": [] + }, + { + "name": "Build | Project System", + "color": "99AAAA", + "description": "Compiling and Packaging", + "aliases": ["Rust"] + }, + { + "name": "Cannot Reproduce", + "color": "D3D3D3", + "description": "Inconsistent Observations", + "aliases": [] + }, + { + "name": "Code Cleanup / Refactoring", + "color": "055a8b", + "description": "Tidying and Making Neat", + "aliases": [] + }, + { + "name": "Continuous Integration", + "color": "41c6b3", + "description": "Workflows and Automation", + "aliases": [] + }, + { + "name": "Dependencies", + "color": "d4f8f6", + "description": "Related to Dependencies", + "aliases": [] + }, + { + "name": "Documentation", + "color": "3d2133", + "description": "Improves Instructions, Guides, and Notices", + "aliases": [] + }, + { + "name": "Duplicate", + "color": "cfd3d7", + "description": "Not Unique", + "aliases": [] + }, + { + "name": "Easy", + "color": "f0cff0", + "description": "Good for Newcomers", + "aliases": [] + }, + { + "name": "Enhancement / Feature Request", + "color": "c9ecbf", + "description": "Something New", + "aliases": [] + }, + { + "name": "External Tools", + "color": "a6006b", + "description": "3rd Party Systems", + "aliases": [] + }, + { + "name": "First Time Contribution", + "color": "f1e0e6", + "description": "Welcome to Torrust", + "aliases": [] + }, + { + "name": "Fixed", + "color": "8e4c42", + "description": "Not a Concern Anymore", + "aliases": [] + }, + { + "name": "Hard", + "color": "2c2c2c", + "description": "Non-Trivial", + "aliases": [] + }, + { + "name": "Help Wanted", + "color": "00896b", + "description": "More Contributions are Appreciated", + "aliases": [] + }, + { + "name": "High Priority", + "color": "ba3fbc", + "description": "Focus Required", + "aliases": [] + }, + { + "name": "Hold Merge", + "color": "9aafbe", + "description": "We are not Ready Yet", + "aliases": [] + }, + { + "name": "Installer | Package", + "color": "ed8b24", + "description": "Distribution to Users", + "aliases": [] + }, + { + "name": "Invalid", + "color": "c1c1c1", + "description": "This doesn't seem right", + "aliases": [] + }, + { + "name": "Legal", + "color": "463e60", + "description": "Licenses and other Official Documents", + "aliases": [] + }, + { + "name": "Low Priority", + "color": "43536b", + "description": "Not our Focus Now", + "aliases": [] + }, + { + "name": "Needs Feedback", + "color": "d6946c", + "description": "What dose the Community Think?", + "aliases": [] + }, + { + "name": "Needs Rebase", + "color": "FBC002", + "description": "Base Branch has Incompatibilities", + "aliases": [] + }, + { + "name": "Needs Research", + "color": "4bc021", + "description": "We Need to Know More About This", + "aliases": [] + }, + { + "name": "Optimization", + "color": "faeba8", + "description": "Make it Faster", + "aliases": [] + }, + { + "name": "Portability", + "color": "95de82", + "description": "Distribution to More Places", + "aliases": [] + }, + { + "name": "Postponed", + "color": "dadada", + "description": "For Later", + "aliases": [] + }, + { + "name": "Quality & Assurance", + "color": "eea2e8", + "description": "Relates to QA, Testing, and CI", + "aliases": [] + }, + { + "name": "Question / Discussion", + "color": "f89d00", + "description": "Community Feedback", + "aliases": [] + }, + { + "name": "Regression", + "color": "d10588", + "description": "It dose not work anymore", + "aliases": [] + }, + { + "name": "Reviewed", + "color": "f4f4ea", + "description": "This Looks Good", + "aliases": [] + }, + { + "name": "Security", + "color": "650606", + "description": "Publicly Connected to Security", + "aliases": [] + }, + { + "name": "Testing", + "color": "c5def5", + "description": "Checking Torrust", + "aliases": [] + }, + { + "name": "Translations", + "color": "0c86af", + "description": "Localization and Cultural Adaptions", + "aliases": [] + }, + { + "name": "Trivial", + "color": "5f9685", + "description": "Something Easy", + "aliases": [] + }, + { + "name": "Won't Fix", + "color": "070003", + "description": "Something Not Relevant", + "aliases": [] + }, + { + "name": "Workaround Possible", + "color": "eae3e7", + "description": "You can still do it another way", + "aliases": [] + }, + { + "name": "good first issue", + "color": "b0fc38", + "description": "Feel free to seek assistance when needed", + "aliases": [] + } +] diff --git a/.github/workflows/container.yaml b/.github/workflows/container.yaml index 884a15843..9f51f3124 100644 --- a/.github/workflows/container.yaml +++ b/.github/workflows/container.yaml @@ -30,7 +30,7 @@ jobs: - id: build name: Build - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: file: ./Containerfile push: false @@ -127,7 +127,7 @@ jobs: uses: docker/setup-buildx-action@v3 - name: Build and push - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: file: ./Containerfile push: true @@ -168,7 +168,7 @@ jobs: uses: docker/setup-buildx-action@v3 - name: Build and push - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: file: ./Containerfile push: true diff --git a/.github/workflows/contract.yaml b/.github/workflows/contract.yaml new file mode 100644 index 000000000..2777417e3 --- /dev/null +++ b/.github/workflows/contract.yaml @@ -0,0 +1,58 @@ +name: Contract + +on: + push: + pull_request: + +env: + CARGO_TERM_COLOR: always + +jobs: + contract: + name: Contract + runs-on: ubuntu-latest + + strategy: + matrix: + toolchain: [nightly, stable] + + steps: + - id: checkout + name: Checkout Repository + uses: actions/checkout@v4 + + - id: setup + name: Setup Toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: ${{ matrix.toolchain }} + components: llvm-tools-preview + + - id: cache + name: Enable Job Cache + uses: Swatinem/rust-cache@v2 + + - id: tools + name: Install Tools + uses: taiki-e/install-action@v2 + with: + tool: cargo-llvm-cov, cargo-nextest + + - id: pretty-test + name: Install pretty-test + run: cargo install cargo-pretty-test + + - id: contract + name: Run contract + run: | + cargo test --lib --bins + cargo pretty-test --lib --bins + + - id: summary + name: Generate contract Summary + run: | + echo "### Tracker Living Contract! :rocket:" >> $GITHUB_STEP_SUMMARY + cargo pretty-test --lib --bins --color=never >> $GITHUB_STEP_SUMMARY + echo '```console' >> $GITHUB_STEP_SUMMARY + echo "$OUTPUT" >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index 1e7dace66..28c1be6d0 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -18,8 +18,8 @@ jobs: runs-on: ubuntu-latest env: CARGO_INCREMENTAL: "0" - RUSTFLAGS: "-Z profile -C codegen-units=1 -C inline-threshold=0 -C link-dead-code -C overflow-checks=off -C panic=abort -Z panic_abort_tests" - RUSTDOCFLAGS: "-Z profile -C codegen-units=1 -C inline-threshold=0 -C link-dead-code -C overflow-checks=off -C panic=abort -Z panic_abort_tests" + RUSTFLAGS: "-Z profile -C codegen-units=1 -C opt-level=0 -C link-dead-code -C overflow-checks=off -Z panic_abort_tests -C panic=abort" + RUSTDOCFLAGS: "-Z profile -C codegen-units=1 -C opt-level=0 -C link-dead-code -C overflow-checks=off -Z panic_abort_tests -C panic=abort" steps: - id: checkout_push @@ -36,7 +36,7 @@ jobs: - id: setup name: Setup Toolchain - uses: dtolnay/rust-toolchain@stable + uses: dtolnay/rust-toolchain@nightly with: toolchain: nightly components: llvm-tools-preview @@ -53,11 +53,23 @@ jobs: - id: check name: Run Build Checks - run: cargo check --workspace --all-targets --all-features + run: cargo check --tests --benches --examples --workspace --all-targets --all-features + + - id: clean + name: Clean Build Directory + run: cargo clean + + - id: build + name: Pre-build Main Project + run: cargo build --workspace --all-targets --all-features --jobs 2 + + - id: build_tests + name: Pre-build Tests + run: cargo build --workspace --all-targets --all-features --tests --jobs 2 - id: test name: Run Unit Tests - run: cargo test --workspace --all-targets --all-features + run: cargo test --tests --workspace --all-targets --all-features - id: coverage name: Generate Coverage Report diff --git a/.github/workflows/deployment.yaml b/.github/workflows/deployment.yaml index 5df50a4b0..6aa66e985 100644 --- a/.github/workflows/deployment.yaml +++ b/.github/workflows/deployment.yaml @@ -12,7 +12,7 @@ jobs: strategy: matrix: - toolchain: [stable, nightly] + toolchain: [nightly, stable] steps: - id: checkout @@ -44,7 +44,7 @@ jobs: name: Setup Toolchain uses: dtolnay/rust-toolchain@stable with: - toolchain: stable + toolchain: ${{ matrix.toolchain }} - id: publish name: Publish Crates @@ -54,6 +54,8 @@ jobs: cargo publish -p torrust-tracker-contrib-bencode cargo publish -p torrust-tracker-located-error cargo publish -p torrust-tracker-primitives + cargo publish -p torrust-tracker-clock cargo publish -p torrust-tracker-configuration + cargo publish -p torrust-tracker-torrent-repository cargo publish -p torrust-tracker-test-helpers cargo publish -p torrust-tracker diff --git a/.github/workflows/labels.yaml b/.github/workflows/labels.yaml new file mode 100644 index 000000000..bb8283f30 --- /dev/null +++ b/.github/workflows/labels.yaml @@ -0,0 +1,36 @@ +name: Labels +on: + workflow_dispatch: + push: + branches: + - develop + paths: + - "/.github/labels.json" + +jobs: + export: + name: Export Existing Labels + runs-on: ubuntu-latest + + steps: + - id: backup + name: Export to Workflow Artifact + uses: EndBug/export-label-config@v1 + + sync: + name: Synchronize Labels from Repo + needs: export + runs-on: ubuntu-latest + + steps: + - id: checkout + name: Checkout Repository + uses: actions/checkout@v4 + + - id: sync + name: Apply Labels from File + uses: EndBug/label-sync@v2 + with: + config-file: .github/labels.json + delete-other-labels: true + token: ${{ secrets.UPDATE_LABELS }} diff --git a/.github/workflows/testing.yaml b/.github/workflows/testing.yaml index 21c47665f..abe6f0a60 100644 --- a/.github/workflows/testing.yaml +++ b/.github/workflows/testing.yaml @@ -39,7 +39,7 @@ jobs: strategy: matrix: - toolchain: [stable, nightly] + toolchain: [nightly, stable] steps: - id: checkout @@ -57,6 +57,12 @@ jobs: name: Enable Workflow Cache uses: Swatinem/rust-cache@v2 + - id: tools + name: Install Tools + uses: taiki-e/install-action@v2 + with: + tool: cargo-machete + - id: check name: Run Build Checks run: cargo check --tests --benches --examples --workspace --all-targets --all-features @@ -65,9 +71,20 @@ jobs: name: Run Lint Checks run: cargo clippy --tests --benches --examples --workspace --all-targets --all-features -- -D clippy::correctness -D clippy::suspicious -D clippy::complexity -D clippy::perf -D clippy::style -D clippy::pedantic - - id: doc - name: Run Documentation Checks - run: cargo test --doc + - id: docs + name: Lint Documentation + env: + RUSTDOCFLAGS: "-D warnings" + run: cargo doc --no-deps --bins --examples --workspace --all-features + + - id: clean + name: Clean Build Directory + run: cargo clean + + - id: deps + name: Check Unused Dependencies + run: cargo machete + unit: name: Units @@ -76,7 +93,7 @@ jobs: strategy: matrix: - toolchain: [stable, nightly] + toolchain: [nightly, stable] steps: - id: checkout @@ -100,11 +117,39 @@ jobs: with: tool: cargo-llvm-cov, cargo-nextest + - id: test-docs + name: Run Documentation Tests + run: cargo test --doc + - id: test name: Run Unit Tests run: cargo test --tests --benches --examples --workspace --all-targets --all-features - # Temporary Disable https://github.com/time-rs/time/issues/618 - # - id: coverage - # name: Generate Coverage Report - # run: cargo llvm-cov nextest --tests --benches --examples --workspace --all-targets --all-features + e2e: + name: E2E + runs-on: ubuntu-latest + needs: unit + + strategy: + matrix: + toolchain: [nightly, stable] + + steps: + - id: setup + name: Setup Toolchain + uses: dtolnay/rust-toolchain@stable + with: + toolchain: ${{ matrix.toolchain }} + components: llvm-tools-preview + + - id: cache + name: Enable Job Cache + uses: Swatinem/rust-cache@v2 + + - id: checkout + name: Checkout Repository + uses: actions/checkout@v4 + + - id: test + name: Run E2E Tests + run: cargo run --bin e2e_tests_runner -- --config-toml-path "./share/default/config/tracker.e2e.container.sqlite3.toml" diff --git a/.gitignore b/.gitignore index 2d8d0b8bd..b60b28991 100644 --- a/.gitignore +++ b/.gitignore @@ -3,10 +3,14 @@ /.coverage/ /.idea/ /.vscode/launch.json -/tracker.toml /data.db /database.db /database.json.bz2 +/flamegraph.svg /storage/ /target /tracker.* +/tracker.toml +callgrind.out +perf.data* +*.code-workspace \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json index 661243fbe..caa48dd01 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -2,6 +2,12 @@ "[rust]": { "editor.formatOnSave": true }, + "[ignore]": { "rust-analyzer.cargo.extraEnv" : { + "RUSTFLAGS": "-Z profile -C codegen-units=1 -C inline-threshold=0 -C link-dead-code -C overflow-checks=off -C panic=abort -Z panic_abort_tests", + "RUSTDOCFLAGS": "-Z profile -C codegen-units=1 -C inline-threshold=0 -C link-dead-code -C overflow-checks=off -C panic=abort -Z panic_abort_tests", + "CARGO_INCREMENTAL": "0", + "RUST_BACKTRACE": "1" + }}, "rust-analyzer.checkOnSave": true, "rust-analyzer.check.command": "clippy", "rust-analyzer.check.allTargets": true, @@ -18,11 +24,12 @@ "-W", "clippy::style", "-W", - "clippy::pedantic", + "clippy::pedantic" ], "evenBetterToml.formatter.allowedBlankLines": 1, "evenBetterToml.formatter.columnWidth": 130, "evenBetterToml.formatter.trailingNewline": true, "evenBetterToml.formatter.reorderKeys": true, "evenBetterToml.formatter.reorderArrays": true, + } \ No newline at end of file diff --git a/COPYRIGHT b/COPYRIGHT deleted file mode 100644 index 6eef820ec..000000000 --- a/COPYRIGHT +++ /dev/null @@ -1,11 +0,0 @@ -Copyright 2023 in the Torrust-Tracker project are retained by their contributors. No -copyright assignment is required to contribute to the Torrust-Tracker project. - -Some files include explicit copyright notices and/or license notices. - -Except as otherwise noted (below and/or in individual files), Torrust-Tracker is -licensed under the GNU Affero General Public License, Version 3.0 . This license applies to all files in the Torrust-Tracker project, except as noted below. - -Except as otherwise noted (below and/or in individual files), Torrust-Tracker is licensed under the MIT-0 license for all commits made after 5 years of merging. This license applies to the version of the files merged into the Torrust-Tracker project at the time of merging, and does not apply to subsequent updates or revisions to those files. - -The contributors to the Torrust-Tracker project disclaim all liability for any damages or losses that may arise from the use of the project. diff --git a/Cargo.lock b/Cargo.lock index 6e4f51bd0..ddee311ea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "addr2line" -version = "0.21.0" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" dependencies = [ "gimli", ] @@ -19,9 +19,9 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "ahash" -version = "0.7.6" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" +checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" dependencies = [ "getrandom", "once_cell", @@ -30,20 +30,21 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.3" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", "once_cell", "version_check", + "zerocopy", ] [[package]] name = "aho-corasick" -version = "1.0.5" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c378d78423fdad8089616f827526ee33c19f2fddbd5de1629152c9593ba4783" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] @@ -65,9 +66,9 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" +checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "android-tzdata" @@ -90,27 +91,92 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299" +[[package]] +name = "anstream" +version = "0.6.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + [[package]] name = "anstyle" -version = "1.0.3" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b84bf0a05bbb2a83e5eb6fa36bb6e87baa08193c35ff52bbf6b38d8af2890e46" +checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" + +[[package]] +name = "anstyle-parse" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" +dependencies = [ + "windows-sys 0.52.0", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" +dependencies = [ + "anstyle", + "windows-sys 0.52.0", +] + +[[package]] +name = "anyhow" +version = "1.0.86" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" + +[[package]] +name = "aquatic_peer_id" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0732a73df221dcb25713849c6ebaf57b85355f669716652a7466f688cc06f25" +dependencies = [ + "compact_str", + "hex", + "quickcheck", + "regex", + "serde", + "zerocopy", +] [[package]] name = "aquatic_udp_protocol" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2919b480121f7d20d247524da62bad1b6b7928bc3f50898f624b5c592727341" +checksum = "0af90e5162f5fcbde33524128f08dc52a779f32512d5f8692eadd4b55c89389e" dependencies = [ + "aquatic_peer_id", "byteorder", "either", + "zerocopy", ] [[package]] name = "arc-swap" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" [[package]] name = "arrayvec" @@ -118,11 +184,44 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +[[package]] +name = "async-attributes" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "async-channel" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" +dependencies = [ + "concurrent-queue", + "event-listener 2.5.3", + "futures-core", +] + +[[package]] +name = "async-channel" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" +dependencies = [ + "concurrent-queue", + "event-listener-strategy", + "futures-core", + "pin-project-lite", +] + [[package]] name = "async-compression" -version = "0.4.3" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb42b2197bf15ccb092b62c74515dbd8b86d0effd934795f6687c93b6e679a2c" +checksum = "fec134f64e2bc57411226dfc4e52dec859ddfc7e711fc5e07b612584f000e4aa" dependencies = [ "brotli", "flate2", @@ -134,37 +233,202 @@ dependencies = [ "zstd-safe", ] +[[package]] +name = "async-executor" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7ebdfa2ebdab6b1760375fa7d6f382b9f486eac35fc994625a00e89280bdbb7" +dependencies = [ + "async-task", + "concurrent-queue", + "fastrand 2.1.0", + "futures-lite 2.3.0", + "slab", +] + +[[package]] +name = "async-global-executor" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" +dependencies = [ + "async-channel 2.3.1", + "async-executor", + "async-io 2.3.4", + "async-lock 3.4.0", + "blocking", + "futures-lite 2.3.0", + "once_cell", + "tokio", +] + +[[package]] +name = "async-io" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc5b45d93ef0529756f812ca52e44c221b35341892d3dcc34132ac02f3dd2af" +dependencies = [ + "async-lock 2.8.0", + "autocfg", + "cfg-if", + "concurrent-queue", + "futures-lite 1.13.0", + "log", + "parking", + "polling 2.8.0", + "rustix 0.37.27", + "slab", + "socket2 0.4.10", + "waker-fn", +] + +[[package]] +name = "async-io" +version = "2.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "444b0228950ee6501b3568d3c93bf1176a1fdbc3b758dcd9475046d30f4dc7e8" +dependencies = [ + "async-lock 3.4.0", + "cfg-if", + "concurrent-queue", + "futures-io", + "futures-lite 2.3.0", + "parking", + "polling 3.7.3", + "rustix 0.38.34", + "slab", + "tracing", + "windows-sys 0.59.0", +] + +[[package]] +name = "async-lock" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "287272293e9d8c41773cec55e365490fe034813a2f172f502d6ddcf75b2f582b" +dependencies = [ + "event-listener 2.5.3", +] + +[[package]] +name = "async-lock" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" +dependencies = [ + "event-listener 5.3.1", + "event-listener-strategy", + "pin-project-lite", +] + +[[package]] +name = "async-std" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62565bb4402e926b29953c785397c6dc0391b7b446e45008b0049eb43cec6f5d" +dependencies = [ + "async-attributes", + "async-channel 1.9.0", + "async-global-executor", + "async-io 1.13.0", + "async-lock 2.8.0", + "crossbeam-utils", + "futures-channel", + "futures-core", + "futures-io", + "futures-lite 1.13.0", + "gloo-timers", + "kv-log-macro", + "log", + "memchr", + "once_cell", + "pin-project-lite", + "pin-utils", + "slab", + "wasm-bindgen-futures", +] + +[[package]] +name = "async-task" +version = "4.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" + [[package]] name = "async-trait" -version = "0.1.73" +version = "0.1.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc00ceb34980c03614e35a3a4e218276a0a824e911d07651cd0d858a51e8c0f0" +checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" dependencies = [ "proc-macro2", "quote", - "syn 2.0.35", + "syn 2.0.74", +] + +[[package]] +name = "atomic" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d818003e740b63afc82337e3160717f4f63078720a810b7b903e70a5d1d2994" +dependencies = [ + "bytemuck", ] +[[package]] +name = "atomic-waker" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + [[package]] name = "autocfg" -version = "1.1.0" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" + +[[package]] +name = "aws-lc-rs" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "4ae74d9bd0a7530e8afd1770739ad34b36838829d6ad61818f9230f683f5ad77" +dependencies = [ + "aws-lc-sys", + "mirai-annotations", + "paste", + "zeroize", +] + +[[package]] +name = "aws-lc-sys" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f0e249228c6ad2d240c2dc94b714d711629d52bad946075d8e9b2f5391f0703" +dependencies = [ + "bindgen", + "cc", + "cmake", + "dunce", + "fs_extra", + "libc", + "paste", +] [[package]] name = "axum" -version = "0.6.20" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" +checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" dependencies = [ "async-trait", "axum-core", - "bitflags 1.3.2", + "axum-macros", "bytes", "futures-util", "http", "http-body", + "http-body-util", "hyper", + "hyper-util", "itoa", "matchit", "memchr", @@ -176,18 +440,19 @@ dependencies = [ "serde_json", "serde_path_to_error", "serde_urlencoded", - "sync_wrapper", + "sync_wrapper 1.0.1", "tokio", "tower", "tower-layer", "tower-service", + "tracing", ] [[package]] name = "axum-client-ip" -version = "0.4.2" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ef117890a418b7832678d9ea1e1c08456dd7b2fd1dadb9676cd6f0fe7eb4b21" +checksum = "72188bed20deb981f3a4a9fe674e5980fd9e9c2bd880baa94715ad5d60d64c67" dependencies = [ "axum", "forwarded-header-value", @@ -196,46 +461,89 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.3.4" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" dependencies = [ "async-trait", "bytes", "futures-util", "http", "http-body", + "http-body-util", "mime", + "pin-project-lite", "rustversion", + "sync_wrapper 0.1.2", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "axum-extra" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0be6ea09c9b96cb5076af0de2e383bd2bc0c18f827cf1967bdd353e0b910d733" +dependencies = [ + "axum", + "axum-core", + "bytes", + "futures-util", + "http", + "http-body", + "http-body-util", + "mime", + "pin-project-lite", + "serde", + "serde_html_form", + "tower", "tower-layer", "tower-service", + "tracing", +] + +[[package]] +name = "axum-macros" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00c055ee2d014ae5981ce1016374e8213682aa14d9bf40e48ab48b5f3ef20eaa" +dependencies = [ + "heck 0.4.1", + "proc-macro2", + "quote", + "syn 2.0.74", ] [[package]] name = "axum-server" -version = "0.5.1" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "447f28c85900215cc1bea282f32d4a2f22d55c5a300afdfbc661c8d6a632e063" +checksum = "56bac90848f6a9393ac03c63c640925c4b7c8ca21654de40d53f55964667c7d8" dependencies = [ "arc-swap", "bytes", "futures-util", "http", "http-body", + "http-body-util", "hyper", + "hyper-util", "pin-project-lite", "rustls", "rustls-pemfile", + "rustls-pki-types", "tokio", "tokio-rustls", + "tower", "tower-service", ] [[package]] name = "backtrace" -version = "0.3.69" +version = "0.3.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" dependencies = [ "addr2line", "cc", @@ -248,22 +556,24 @@ dependencies = [ [[package]] name = "base64" -version = "0.13.1" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64" -version = "0.21.4" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ba43ea6f343b788c8764558649e08df62f86c6ef251fdaeb1ffd010a9ae50a2" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "bigdecimal" -version = "0.3.1" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6773ddc0eafc0e509fb60e48dff7f450f8e674a0686ae8605e8d9901bd5eefa" +checksum = "51d712318a27c7150326677b321a5fa91b55f6d9034ffd67f20319e147d40cee" dependencies = [ + "autocfg", + "libm", "num-bigint", "num-integer", "num-traits", @@ -277,22 +587,25 @@ checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" [[package]] name = "bindgen" -version = "0.68.1" +version = "0.69.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "726e4313eb6ec35d2730258ad4e15b547ee75d6afaa1361a922e78e59b7d8078" +checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.6.0", "cexpr", "clang-sys", + "itertools 0.12.1", "lazy_static", "lazycell", - "peeking_take_while", + "log", + "prettyplease", "proc-macro2", "quote", "regex", "rustc-hash", "shlex", - "syn 2.0.35", + "syn 2.0.74", + "which", ] [[package]] @@ -303,9 +616,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" [[package]] name = "bitvec" @@ -329,55 +642,47 @@ dependencies = [ ] [[package]] -name = "borsh" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4114279215a005bc675e386011e594e1d9b800918cea18fcadadcce864a2046b" -dependencies = [ - "borsh-derive", - "hashbrown 0.13.2", -] - -[[package]] -name = "borsh-derive" -version = "0.10.3" +name = "blocking" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0754613691538d51f329cce9af41d7b7ca150bc973056f1156611489475f54f7" +checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea" dependencies = [ - "borsh-derive-internal", - "borsh-schema-derive-internal", - "proc-macro-crate 0.1.5", - "proc-macro2", - "syn 1.0.109", + "async-channel 2.3.1", + "async-task", + "futures-io", + "futures-lite 2.3.0", + "piper", ] [[package]] -name = "borsh-derive-internal" -version = "0.10.3" +name = "borsh" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afb438156919598d2c7bad7e1c0adf3d26ed3840dbc010db1a882a65583ca2fb" +checksum = "a6362ed55def622cddc70a4746a68554d7b687713770de539e59a739b249f8ed" dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", + "borsh-derive", + "cfg_aliases", ] [[package]] -name = "borsh-schema-derive-internal" -version = "0.10.3" +name = "borsh-derive" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634205cc43f74a1b9046ef87c4540ebda95696ec0f315024860cad7c5b0f5ccd" +checksum = "c3ef8005764f53cd4dca619f5bf64cafd4664dada50ece25e4d81de54c80cc0b" dependencies = [ + "once_cell", + "proc-macro-crate", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.74", + "syn_derive", ] [[package]] name = "brotli" -version = "3.3.4" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1a0b1dbcc8ae29329621f8d4f0d835787c1c38bb1401979b49d13b0b305ff68" +checksum = "74f7971dbd9326d58187408ab83117d8ac1bb9c17b085fdacd1cf2f598719b6b" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -386,14 +691,23 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "2.3.4" +version = "4.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b6561fd3f895a11e8f72af2cb7d22e08366bebc2b6b57f7744c4bda27034744" +checksum = "9a45bd2e4095a8b518033b128020dd4a55aab1c0a381ba4404a472630f4bc362" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", ] +[[package]] +name = "btoi" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dd6407f73a9b8b6162d8a2ef999fe6afd7cc15902ebf42c5cd296addf17e0ad" +dependencies = [ + "num-traits", +] + [[package]] name = "bufstream" version = "0.1.4" @@ -402,15 +716,15 @@ checksum = "40e38929add23cdf8a366df9b0e088953150724bcbe5fc330b0d8eb3b328eec8" [[package]] name = "bumpalo" -version = "3.14.0" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "bytecheck" -version = "0.6.11" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6372023ac861f6e6dc89c8344a8f398fb42aaba2b5dbc649ca0c0e9dbcb627" +checksum = "23cdc57ce23ac53c931e88a43d06d070a6fd142f2617be5855eb75efc9beb1c2" dependencies = [ "bytecheck_derive", "ptr_meta", @@ -419,26 +733,41 @@ dependencies = [ [[package]] name = "bytecheck_derive" -version = "0.6.11" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7ec4c6f261935ad534c0c22dbef2201b45918860eb1c574b972bd213a76af61" +checksum = "3db406d29fbcd95542e92559bed4d8ad92636d1ca8b3b72ede10b4bcc010e659" dependencies = [ "proc-macro2", "quote", "syn 1.0.109", ] +[[package]] +name = "bytemuck" +version = "1.16.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "102087e286b4677862ea56cf8fc58bb2cdfa8725c40ffb80fe3a008eb7f2fc83" + [[package]] name = "byteorder" -version = "1.4.3" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.5.0" +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8318a53db07bb3f8dca91a600466bdb3f2eaadeedfdbcf02e1accbad9271ba50" + +[[package]] +name = "camino" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +checksum = "e0ec6b951b160caa93cc0c7b209e5a3bff7aae9062213451ac99493cd844c239" +dependencies = [ + "serde", +] [[package]] name = "cast" @@ -446,11 +775,20 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" +[[package]] +name = "castaway" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0abae9be0aaf9ea96a3b1b8b1b55c602ca751eba1b1500220cea4ecbafe7c0d5" +dependencies = [ + "rustversion", +] + [[package]] name = "cc" -version = "1.0.83" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +checksum = "e9e8aabfac534be767c909e0690571677d49f41bd8465ae876fe043d52ba5292" dependencies = [ "jobserver", "libc", @@ -471,24 +809,30 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "chrono" -version = "0.4.31" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f2c685bad3eb3d45a01354cedb7d5faa66194d1d58ba6e267a8de788f79db38" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" dependencies = [ "android-tzdata", "iana-time-zone", "num-traits", "serde", - "windows-targets", + "windows-targets 0.52.6", ] [[package]] name = "ciborium" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "effd91f6c78e5a4ace8a5d3c0b6bfaec9e2baaef55f3efc00e45fb2e477ee926" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" dependencies = [ "ciborium-io", "ciborium-ll", @@ -497,15 +841,15 @@ dependencies = [ [[package]] name = "ciborium-io" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdf919175532b369853f5d5e20b26b43112613fd6fe7aee757e35f7a44642656" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" [[package]] name = "ciborium-ll" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defaa24ecc093c77630e6c15e17c51f5e187bf35ee514f4e2d67baaa96dae22b" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" dependencies = [ "ciborium-io", "half", @@ -513,9 +857,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.6.1" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" dependencies = [ "glob", "libc", @@ -524,28 +868,43 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.3" +version = "4.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84ed82781cea27b43c9b106a979fe450a13a31aab0500595fb3fc06616de08e6" +checksum = "11d8838454fda655dafd3accb2b6e2bea645b9e4078abe84a22ceb947235c5cc" dependencies = [ "clap_builder", + "clap_derive", ] [[package]] name = "clap_builder" -version = "4.4.2" +version = "4.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bb9faaa7c2ef94b2743a21f5a29e6f0010dff4caa69ac8e9d6cf8b6fa74da08" +checksum = "216aec2b177652e3846684cbfe25c9964d18ec45234f0f5da5157b207ed1aab6" dependencies = [ + "anstream", "anstyle", "clap_lex", + "strsim", +] + +[[package]] +name = "clap_derive" +version = "4.5.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "501d359d5f3dcaf6ecdeee48833ae73ec6e42723a1e52419c79abf9507eec0a0" +dependencies = [ + "heck 0.5.0", + "proc-macro2", + "quote", + "syn 2.0.74", ] [[package]] name = "clap_lex" -version = "0.5.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd7cc57abe963c6d3b9d8be5b06ba7c8957a930305ca90304f24ef040aa6f961" +checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" [[package]] name = "cmake" @@ -557,22 +916,31 @@ dependencies = [ ] [[package]] -name = "config" -version = "0.13.3" +name = "colorchoice" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d379af7f68bfc21714c6c7dea883544201741d2ce8274bb12fa54f89507f52a7" +checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" + +[[package]] +name = "compact_str" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f86b9c4c00838774a6d902ef931eff7470720c51d90c2e32cfe15dc304737b3f" dependencies = [ - "async-trait", - "json5", - "lazy_static", - "nom", - "pathdiff", - "ron", - "rust-ini", - "serde", - "serde_json", - "toml 0.5.11", - "yaml-rust", + "castaway", + "cfg-if", + "itoa", + "ryu", + "static_assertions", +] + +[[package]] +name = "concurrent-queue" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" +dependencies = [ + "crossbeam-utils", ] [[package]] @@ -583,9 +951,9 @@ checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" [[package]] name = "core-foundation" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" dependencies = [ "core-foundation-sys", "libc", @@ -593,24 +961,24 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.4" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e496a50fda8aacccc86d7529e2c1e0892dbd0f898a6b5645b5561b89c3210efa" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.9" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a17b76ff3a4162b0b27f354a0c87015ddad39d35f9c0c36607a3bdd175dde1f1" +checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" dependencies = [ "libc", ] [[package]] name = "crc32fast" -version = "1.3.2" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" +checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ "cfg-if", ] @@ -626,8 +994,9 @@ dependencies = [ "ciborium", "clap", "criterion-plot", + "futures", "is-terminal", - "itertools", + "itertools 0.10.5", "num-traits", "once_cell", "oorandom", @@ -638,6 +1007,7 @@ dependencies = [ "serde_derive", "serde_json", "tinytemplate", + "tokio", "walkdir", ] @@ -648,16 +1018,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1" dependencies = [ "cast", - "itertools", + "itertools 0.10.5", ] [[package]] name = "crossbeam" -version = "0.8.2" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c" +checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8" dependencies = [ - "cfg-if", "crossbeam-channel", "crossbeam-deque", "crossbeam-epoch", @@ -667,57 +1036,63 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.8" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a33c2bf77f2df06183c3aa30d1e96c0695a313d4f9c453cc3762a6db39f99200" +checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" dependencies = [ - "cfg-if", "crossbeam-utils", ] [[package]] name = "crossbeam-deque" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ - "cfg-if", "crossbeam-epoch", "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" -version = "0.9.15" +version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "autocfg", - "cfg-if", "crossbeam-utils", - "memoffset", - "scopeguard", ] [[package]] name = "crossbeam-queue" -version = "0.3.8" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" +checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35" dependencies = [ - "cfg-if", "crossbeam-utils", ] [[package]] -name = "crossbeam-utils" -version = "0.8.16" +name = "crossbeam-skiplist" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a22b2d63d4d1dc0b7f1b6b2747dd0088008a9be28b6ddf0b1e7d335e3037294" +checksum = "df29de440c58ca2cc6e587ec3d22347551a32435fbde9d2bff64e78a9ffa151b" dependencies = [ - "cfg-if", + "crossbeam-epoch", + "crossbeam-utils", ] +[[package]] +name = "crossbeam-utils" +version = "0.8.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" + +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + [[package]] name = "crypto-common" version = "0.1.6" @@ -730,9 +1105,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.3" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0209d94da627ab5605dcccf08bb18afa5009cfbef48d8a8b7d7bdbc79be25c5e" +checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" dependencies = [ "darling_core", "darling_macro", @@ -740,68 +1115,77 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.3" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "177e3443818124b357d8e76f53be906d60937f0d3a90773a664fa63fa253e621" +checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", "strsim", - "syn 2.0.35", + "syn 2.0.74", ] [[package]] name = "darling_macro" -version = "0.20.3" +version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836a9bbc7ad63342d6d6e7b815ccab164bc77a2d95d84bc3117a8c0d5c98e2d5" +checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.35", + "syn 2.0.74", +] + +[[package]] +name = "dashmap" +version = "6.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "804c8821570c3f8b70230c2ba75ffa5c0f9a4189b9a432b6656c536712acae28" +dependencies = [ + "cfg-if", + "crossbeam-utils", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core", ] [[package]] name = "deranged" -version = "0.3.8" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2696e8a945f658fd14dc3b87242e6b80cd0f36ff04ea560fa39082368847946" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ + "powerfmt", "serde", ] [[package]] name = "derive_more" -version = "0.99.17" +version = "0.99.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" +checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "convert_case", "proc-macro2", "quote", "rustc_version", - "syn 1.0.109", + "syn 2.0.74", ] [[package]] name = "derive_utils" -version = "0.13.2" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9abcad25e9720609ccb3dcdb795d845e37d8ce34183330a9f48b03a1a71c8e21" +checksum = "61bb5a1014ce6dfc2a378578509abe775a5aa06bff584a547555d9efdb81b926" dependencies = [ "proc-macro2", "quote", - "syn 2.0.35", + "syn 2.0.74", ] -[[package]] -name = "difflib" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" - [[package]] name = "digest" version = "0.10.7" @@ -812,33 +1196,43 @@ dependencies = [ "crypto-common", ] -[[package]] -name = "dlv-list" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0688c2a7f92e427f44895cd63841bff7b29f8d7a1648b9e7e07a4a365b2e1257" - [[package]] name = "downcast" version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + [[package]] name = "either" -version = "1.9.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" [[package]] name = "encoding_rs" -version = "0.8.33" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" +checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" dependencies = [ "cfg-if", ] +[[package]] +name = "env_logger" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" +dependencies = [ + "log", + "regex", +] + [[package]] name = "equivalent" version = "1.0.1" @@ -847,23 +1241,12 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "136526188508e25c6fef639d7927dfb3e0e3084488bf202267829cf7fc23dbdd" -dependencies = [ - "errno-dragonfly", - "libc", - "windows-sys", -] - -[[package]] -name = "errno-dragonfly" -version = "0.1.2" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa68f1b12764fab894d2755d2518754e71b4fd80ecfb822714a1206c2aab39bf" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" dependencies = [ - "cc", "libc", + "windows-sys 0.52.0", ] [[package]] @@ -876,11 +1259,38 @@ dependencies = [ "version_check", ] +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + +[[package]] +name = "event-listener" +version = "5.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6032be9bd27023a771701cc49f9f053c751055f71efb2e0ae5c15809093675ba" +dependencies = [ + "concurrent-queue", + "parking", + "pin-project-lite", +] + +[[package]] +name = "event-listener-strategy" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +dependencies = [ + "event-listener 5.3.1", + "pin-project-lite", +] + [[package]] name = "fallible-iterator" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" +checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" [[package]] name = "fallible-streaming-iterator" @@ -890,39 +1300,46 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "fastrand" -version = "2.0.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6999dc1837253364c2ebb0704ba97994bd874e8f195d665c50b7548f6ea92764" +checksum = "e51093e27b0797c359783294ca4f0a911c270184cb10f85783b118614a1501be" +dependencies = [ + "instant", +] + +[[package]] +name = "fastrand" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" [[package]] -name = "fern" -version = "0.6.2" +name = "figment" +version = "0.10.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9f0c14694cbd524c8720dd69b0e3179344f04ebb5f90f2e4a440c6ea3b2f1ee" +checksum = "8cb01cd46b0cf372153850f4c6c272d9cbea2da513e07538405148f95bd789f3" dependencies = [ - "log", + "atomic", + "parking_lot", + "pear", + "serde", + "tempfile", + "toml", + "uncased", + "version_check", ] [[package]] name = "flate2" -version = "1.0.27" +version = "1.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6c98ee8095e9d1dcbf2fcc6d95acccb90d1c81db1e44725c6a984b1dbdfb010" +checksum = "7f211bbe8e69bbd0cfdea405084f128ae8b4aaa6b0b522fc8f2b009084797920" dependencies = [ "crc32fast", "libz-sys", "miniz_oxide", ] -[[package]] -name = "float-cmp" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98de4bbd547a563b716d8dfa9aad1cb19bfab00f4fa09a6a4ed21dbcf44ce9c4" -dependencies = [ - "num-traits", -] - [[package]] name = "fnv" version = "1.0.7" @@ -946,9 +1363,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ "percent-encoding", ] @@ -994,7 +1411,7 @@ checksum = "b0fa992f1656e1707946bbba340ad244f0814009ef8c0118eb7b658395f19a2e" dependencies = [ "frunk_proc_macro_helpers", "quote", - "syn 2.0.35", + "syn 2.0.74", ] [[package]] @@ -1006,7 +1423,7 @@ dependencies = [ "frunk_core", "proc-macro2", "quote", - "syn 2.0.35", + "syn 2.0.74", ] [[package]] @@ -1018,9 +1435,15 @@ dependencies = [ "frunk_core", "frunk_proc_macro_helpers", "quote", - "syn 2.0.35", + "syn 2.0.74", ] +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + [[package]] name = "funty" version = "2.0.0" @@ -1029,9 +1452,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", @@ -1044,9 +1467,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -1054,15 +1477,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -1071,38 +1494,72 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.28" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" + +[[package]] +name = "futures-lite" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" +checksum = "49a9d51ce47660b1e808d3c990b4709f2f415d928835a17dfd16991515c46bce" +dependencies = [ + "fastrand 1.9.0", + "futures-core", + "futures-io", + "memchr", + "parking", + "pin-project-lite", + "waker-fn", +] + +[[package]] +name = "futures-lite" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" +dependencies = [ + "fastrand 2.1.0", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] [[package]] name = "futures-macro" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.35", + "syn 2.0.74", ] [[package]] name = "futures-sink" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.28" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" + +[[package]] +name = "futures-timer" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" [[package]] name = "futures-util" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures-channel", "futures-core", @@ -1128,9 +1585,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.10" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", "libc", @@ -1139,9 +1596,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.28.0" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" +checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" [[package]] name = "glob" @@ -1149,19 +1606,31 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +[[package]] +name = "gloo-timers" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + [[package]] name = "h2" -version = "0.3.21" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91fc23aa11be92976ef4729127f1a74adf36d8436f7816b185d18df956790833" +checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" dependencies = [ + "atomic-waker", "bytes", "fnv", "futures-core", "futures-sink", - "futures-util", "http", - "indexmap 1.9.3", + "indexmap 2.3.0", "slab", "tokio", "tokio-util", @@ -1170,45 +1639,40 @@ dependencies = [ [[package]] name = "half" -version = "1.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" - -[[package]] -name = "hashbrown" -version = "0.12.3" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" dependencies = [ - "ahash 0.7.6", + "cfg-if", + "crunchy", ] [[package]] name = "hashbrown" -version = "0.13.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash 0.8.3", + "ahash 0.7.8", ] [[package]] name = "hashbrown" -version = "0.14.0" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ - "ahash 0.8.3", + "ahash 0.8.11", "allocator-api2", ] [[package]] name = "hashlink" -version = "0.8.4" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8094feaf31ff591f651a2664fb9cfd92bba7a60ce3197265e9482ebe753c8f7" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" dependencies = [ - "hashbrown 0.14.0", + "hashbrown 0.14.5", ] [[package]] @@ -1217,11 +1681,23 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + [[package]] name = "hermit-abi" -version = "0.3.2" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" + +[[package]] +name = "hermit-abi" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "443144c8cdadd93ebf52ddb4056d257f5b52c04d3c804e657d19eb73fc33668b" +checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" [[package]] name = "hex" @@ -1229,11 +1705,26 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +[[package]] +name = "hex-literal" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fe2267d4ed49bc07b63801559be28c718ea06c4738b7a03c94df7386d2cde46" + +[[package]] +name = "home" +version = "0.5.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" +dependencies = [ + "windows-sys 0.52.0", +] + [[package]] name = "http" -version = "0.2.9" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" dependencies = [ "bytes", "fnv", @@ -1242,26 +1733,32 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.5" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", "http", - "pin-project-lite", ] [[package]] -name = "http-range-header" -version = "0.3.1" +name = "http-body-util" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "add0ab9360ddbd88cfeb3bd9574a1d85cfdfa14db10b3e21d3700dbc4328758f" +checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +dependencies = [ + "bytes", + "futures-util", + "http", + "http-body", + "pin-project-lite", +] [[package]] name = "httparse" -version = "1.8.0" +version = "1.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" +checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" [[package]] name = "httpdate" @@ -1271,13 +1768,12 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "hyper" -version = "0.14.27" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb1cfd654a8219eaef89881fdb3bb3b1cdc5fa75ded05d6933b2b382e395468" +checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" dependencies = [ "bytes", "futures-channel", - "futures-core", "futures-util", "h2", "http", @@ -1286,38 +1782,76 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.9", + "smallvec", "tokio", - "tower-service", - "tracing", "want", ] +[[package]] +name = "hyper-rustls" +version = "0.27.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" +dependencies = [ + "futures-util", + "http", + "hyper", + "hyper-util", + "rustls", + "rustls-pki-types", + "tokio", + "tokio-rustls", + "tower-service", +] + [[package]] name = "hyper-tls" -version = "0.5.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ "bytes", + "http-body-util", "hyper", + "hyper-util", "native-tls", "tokio", "tokio-native-tls", + "tower-service", +] + +[[package]] +name = "hyper-util" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cde7055719c54e36e95e8719f95883f22072a48ede39db7fc17a4e1d5281e9b9" +dependencies = [ + "bytes", + "futures-channel", + "futures-util", + "http", + "http-body", + "hyper", + "pin-project-lite", + "socket2 0.5.7", + "tokio", + "tower", + "tower-service", + "tracing", ] [[package]] name = "iana-time-zone" -version = "0.1.57" +version = "0.1.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fad5b825842d2b38bd206f3e81d6957625fd7f0a361e345c30e01a0ae2dd613" +checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows", + "windows-core", ] [[package]] @@ -1337,9 +1871,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -1358,192 +1892,163 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.0.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" +checksum = "de3fc2e30ba82dd1b3911c8de1ffc143c74a914a14e99514d7637e3099df5ea0" dependencies = [ "equivalent", - "hashbrown 0.14.0", + "hashbrown 0.14.5", "serde", ] [[package]] -name = "io-enum" -version = "1.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5305557fa27b460072ae15ce07617e999f5879f14d376c8449f0bfb9f9d8e91e" -dependencies = [ - "derive_utils", - "syn 2.0.35", -] - -[[package]] -name = "ipnet" -version = "2.8.0" +name = "inlinable_string" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" +checksum = "c8fae54786f62fb2918dcfae3d568594e50eb9b5c25bf04371af6fe7516452fb" [[package]] -name = "is-terminal" -version = "0.4.9" +name = "instant" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" dependencies = [ - "hermit-abi", - "rustix", - "windows-sys", + "cfg-if", ] [[package]] -name = "itertools" -version = "0.10.5" +name = "io-enum" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +checksum = "53b53d712d99a73eec59ee5e4fe6057f8052142d38eeafbbffcb06b36d738a6e" dependencies = [ - "either", + "derive_utils", ] [[package]] -name = "itoa" -version = "1.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" - -[[package]] -name = "jobserver" -version = "0.1.26" +name = "io-lifetimes" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "936cfd212a0155903bcbc060e316fb6cc7cbf2e1907329391ebadc1fe0ce77c2" +checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ + "hermit-abi 0.3.9", "libc", + "windows-sys 0.48.0", ] [[package]] -name = "js-sys" -version = "0.3.64" +name = "ipnet" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" -dependencies = [ - "wasm-bindgen", -] +checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" [[package]] -name = "json5" -version = "0.4.1" +name = "is-terminal" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b0db21af676c1ce64250b5f40f3ce2cf27e4e47cb91ed91eb6fe9350b430c1" +checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" dependencies = [ - "pest", - "pest_derive", - "serde", + "hermit-abi 0.3.9", + "libc", + "windows-sys 0.52.0", ] [[package]] -name = "lazy_static" -version = "1.4.0" +name = "is_terminal_polyfill" +version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" [[package]] -name = "lazycell" -version = "1.3.0" +name = "itertools" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +dependencies = [ + "either", +] [[package]] -name = "lexical" -version = "6.1.1" +name = "itertools" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7aefb36fd43fef7003334742cbf77b243fcd36418a1d1bdd480d613a67968f6" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" dependencies = [ - "lexical-core", + "either", ] [[package]] -name = "lexical-core" -version = "0.8.5" +name = "itoa" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cde5de06e8d4c2faabc400238f9ae1c74d5412d03a7bd067645ccbc47070e46" -dependencies = [ - "lexical-parse-float", - "lexical-parse-integer", - "lexical-util", - "lexical-write-float", - "lexical-write-integer", -] +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] -name = "lexical-parse-float" -version = "0.8.5" +name = "jobserver" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "683b3a5ebd0130b8fb52ba0bdc718cc56815b6a097e28ae5a6997d0ad17dc05f" +checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" dependencies = [ - "lexical-parse-integer", - "lexical-util", - "static_assertions", + "libc", ] [[package]] -name = "lexical-parse-integer" -version = "0.8.6" +name = "js-sys" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d0994485ed0c312f6d965766754ea177d07f9c00c9b82a5ee62ed5b47945ee9" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" dependencies = [ - "lexical-util", - "static_assertions", + "wasm-bindgen", ] [[package]] -name = "lexical-util" -version = "0.8.5" +name = "kv-log-macro" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5255b9ff16ff898710eb9eb63cb39248ea8a5bb036bea8085b1a767ff6c4e3fc" +checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" dependencies = [ - "static_assertions", + "log", ] [[package]] -name = "lexical-write-float" -version = "0.8.5" +name = "lazy_static" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "accabaa1c4581f05a3923d1b4cfd124c329352288b7b9da09e766b0668116862" -dependencies = [ - "lexical-util", - "lexical-write-integer", - "static_assertions", -] +checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] -name = "lexical-write-integer" -version = "0.8.5" +name = "lazycell" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1b6f3d1f4422866b68192d62f77bc5c700bee84f3069f2469d7bc8c77852446" -dependencies = [ - "lexical-util", - "static_assertions", -] +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.148" +version = "0.2.155" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cdc71e17332e86d2e1d38c1f99edcb6288ee11b815fb1a4b049eaa2114d369b" +checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" [[package]] name = "libloading" -version = "0.7.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" +checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", - "winapi", + "windows-targets 0.52.6", ] +[[package]] +name = "libm" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" + [[package]] name = "libsqlite3-sys" -version = "0.26.0" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afc22eff61b133b115c6e8c74e818c628d6d5e7a502afea6f64dee076dd94326" +checksum = "2e99fb7a497b1e3339bc746195567ed8d3e24945ecd636e3619d20b9de9e9149" dependencies = [ "cc", "pkg-config", @@ -1552,9 +2057,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.12" +version = "1.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d97137b25e321a73eef1418d1d5d2eda4d77e12813f8e6dead84bc52c5870a7b" +checksum = "c15da26e5af7e25c90b37a2d75cdbf940cf4a55316de9d84c679c9b8bfabf82e" dependencies = [ "cc", "pkg-config", @@ -1562,34 +2067,34 @@ dependencies = [ ] [[package]] -name = "linked-hash-map" -version = "0.5.6" +name = "linux-raw-sys" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" +checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.7" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a9bad9f94746442c783ca431b22403b519cd7fbeed0533fdd6328b2f2212128" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "local-ip-address" -version = "0.5.5" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fefe707432eb6bd4704b3dacfc87aab269d56667ad05dcd6869534e8890e767" +checksum = "136ef34e18462b17bf39a7826f8f3bbc223341f8e83822beb8b77db9a3d49696" dependencies = [ "libc", "neli", "thiserror", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] name = "lock_api" -version = "0.4.10" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1cc9717a20b1bb222f333e6a92fd32f7d8a18ddc5a3191a11af45dcbf4dcd16" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", @@ -1597,39 +2102,33 @@ dependencies = [ [[package]] name = "log" -version = "0.4.20" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +dependencies = [ + "value-bag", +] [[package]] name = "lru" -version = "0.10.1" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "718e8fae447df0c7e1ba7f5189829e63fd536945c8988d61444c19039f16b670" +checksum = "37ee39891760e7d94734f6f63fedc29a2e4a152f836120753a72503f09fcf904" dependencies = [ - "hashbrown 0.13.2", + "hashbrown 0.14.5", ] [[package]] name = "matchit" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed1202b2a6f884ae56f04cff409ab315c5ce26b5e58d7412e484f01fd52f52ef" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" [[package]] name = "memchr" -version = "2.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f232d6ef707e1956a43342693d2a31e72989554d58299d7a88738cc95b0d35c" - -[[package]] -name = "memoffset" -version = "0.9.0" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" -dependencies = [ - "autocfg", -] +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "mime" @@ -1645,34 +2144,40 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.1" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" dependencies = [ "adler", ] [[package]] name = "mio" -version = "0.8.8" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "927a765cd3fc26206e66b296465fa9d3e5ab003e651c1b3c060e7956d96b19d2" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ + "hermit-abi 0.3.9", "libc", "wasi", - "windows-sys", + "windows-sys 0.52.0", ] +[[package]] +name = "mirai-annotations" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9be0862c1b3f26a88803c4a49de6889c10e608b3ee9344e6ef5b45fb37ad3d1" + [[package]] name = "mockall" -version = "0.11.4" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c84490118f2ee2d74570d114f3d0493cbf02790df303d2707606c3e14e07c96" +checksum = "d4c28b3fb6d753d28c20e826cd46ee611fda1cf3cde03a443a974043247c065a" dependencies = [ "cfg-if", "downcast", "fragile", - "lazy_static", "mockall_derive", "predicates", "predicates-tree", @@ -1680,30 +2185,30 @@ dependencies = [ [[package]] name = "mockall_derive" -version = "0.11.4" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22ce75669015c4f47b289fd4d4f56e894e4c96003ffdf3ac51313126f94c6cbb" +checksum = "341014e7f530314e9a1fdbc7400b244efea7122662c96bfa248c31da5bfb2020" dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 1.0.109", + "syn 2.0.74", ] [[package]] name = "multimap" -version = "0.9.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70db9248a93dc36a36d9a47898caa007a32755c7ad140ec64eeeb50d5a730631" +checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" dependencies = [ "serde", ] [[package]] name = "mysql" -version = "24.0.0" +version = "25.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfe2babc5f5b354eab9c0a0e40da3e69c4d77421c8b9b6ee03f97acc75bd7955" +checksum = "c6ad644efb545e459029b1ffa7c969d830975bd76906820913247620df10050b" dependencies = [ "bufstream", "bytes", @@ -1715,45 +2220,45 @@ dependencies = [ "mysql_common", "named_pipe", "native-tls", - "once_cell", "pem", "percent-encoding", "serde", "serde_json", - "socket2 0.5.4", + "socket2 0.5.7", "twox-hash", "url", ] [[package]] name = "mysql-common-derive" -version = "0.30.2" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56b0d8a0db9bf6d2213e11f2c701cb91387b0614361625ab7b9743b41aa4938f" +checksum = "afe0450cc9344afff34915f8328600ab5ae19260802a334d0f72d2d5bdda3bfe" dependencies = [ "darling", - "heck", + "heck 0.4.1", "num-bigint", - "proc-macro-crate 1.3.1", + "proc-macro-crate", "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.35", + "syn 2.0.74", "termcolor", "thiserror", ] [[package]] name = "mysql_common" -version = "0.30.6" +version = "0.32.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57349d5a326b437989b6ee4dc8f2f34b0cc131202748414712a8e7d98952fc8c" +checksum = "478b0ff3f7d67b79da2b96f56f334431aef65e15ba4b29dd74a4236e29582bdc" dependencies = [ - "base64 0.21.4", + "base64 0.21.7", "bigdecimal", "bindgen", - "bitflags 2.4.0", + "bitflags 2.6.0", "bitvec", + "btoi", "byteorder", "bytes", "cc", @@ -1762,7 +2267,6 @@ dependencies = [ "flate2", "frunk", "lazy_static", - "lexical", "mysql-common-derive", "num-bigint", "num-traits", @@ -1779,6 +2283,7 @@ dependencies = [ "thiserror", "time", "uuid", + "zstd", ] [[package]] @@ -1792,11 +2297,10 @@ dependencies = [ [[package]] name = "native-tls" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" +checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" dependencies = [ - "lazy_static", "libc", "log", "openssl", @@ -1850,79 +2354,77 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7" [[package]] -name = "normalize-line-endings" -version = "0.3.0" +name = "nu-ansi-term" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] [[package]] name = "num-bigint" -version = "0.4.4" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ - "autocfg", "num-integer", "num-traits", ] +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + [[package]] name = "num-integer" -version = "0.1.45" +version = "0.1.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" dependencies = [ - "autocfg", "num-traits", ] [[package]] name = "num-traits" -version = "0.2.16" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f30b0abd723be7e2ffca1272140fac1a2f084c77ec3e123c192b66af1ee9e6c2" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", ] -[[package]] -name = "num_cpus" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" -dependencies = [ - "hermit-abi", - "libc", -] - [[package]] name = "object" -version = "0.32.1" +version = "0.36.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" +checksum = "27b64972346851a39438c60b341ebc01bba47464ae329e55cf343eb93964efd9" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.18.0" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "oorandom" -version = "11.1.3" +version = "11.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" +checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "openssl" -version = "0.10.57" +version = "0.10.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bac25ee399abb46215765b1cb35bc0212377e58a061560d8b29b024fd0430e7c" +checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.6.0", "cfg-if", "foreign-types", "libc", @@ -1939,7 +2441,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.35", + "syn 2.0.74", ] [[package]] @@ -1948,43 +2450,35 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" -[[package]] -name = "openssl-src" -version = "300.1.3+3.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd2c101a165fff9935e34def4669595ab1c7847943c42be86e21503e482be107" -dependencies = [ - "cc", -] - [[package]] name = "openssl-sys" -version = "0.9.93" +version = "0.9.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db4d56a4c0478783083cfafcc42493dd4a981d41669da64b4572a2a089b51b1d" +checksum = "7f9e8deee91df40a943c71b917e5874b951d32a802526c85721ce3b776c929d6" dependencies = [ "cc", "libc", - "openssl-src", "pkg-config", "vcpkg", ] [[package]] -name = "ordered-multimap" -version = "0.4.3" +name = "overload" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccd746e37177e1711c20dd619a1620f34f5c8b569c53590a72dedd5344d8924a" -dependencies = [ - "dlv-list", - "hashbrown 0.12.3", -] +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + +[[package]] +name = "parking" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" [[package]] name = "parking_lot" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", "parking_lot_core", @@ -1992,115 +2486,125 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.8" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93f00c865fe7cabf650081affecd3871070f26767e7b2070a3ffae14c654b447" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", - "windows-targets", + "windows-targets 0.52.6", ] [[package]] -name = "pathdiff" -version = "0.2.1" +name = "paste" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8835116a5c179084a830efb3adc117ab007512b535bc1a21c991d3b32a6b44dd" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] -name = "peeking_take_while" -version = "0.1.2" +name = "pear" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bdeeaa00ce488657faba8ebf44ab9361f9365a97bd39ffb8a60663f57ff4b467" +dependencies = [ + "inlinable_string", + "pear_codegen", + "yansi", +] + +[[package]] +name = "pear_codegen" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" +checksum = "4bab5b985dc082b345f812b7df84e1bef27e7207b39e448439ba8bd69c93f147" +dependencies = [ + "proc-macro2", + "proc-macro2-diagnostics", + "quote", + "syn 2.0.74", +] [[package]] name = "pem" -version = "2.0.1" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b13fe415cdf3c8e44518e18a7c95a13431d9bdf6d15367d82b23c377fdd441a" +checksum = "8e459365e590736a54c3fa561947c84837534b8e9af6fc5bf781307e82658fae" dependencies = [ - "base64 0.21.4", + "base64 0.22.1", "serde", ] [[package]] name = "percent-encoding" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] -name = "pest" -version = "2.7.3" +name = "phf" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7a4d085fd991ac8d5b05a147b437791b4260b76326baf0fc60cf7c9c27ecd33" +checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" dependencies = [ - "memchr", - "thiserror", - "ucd-trie", + "phf_shared", ] [[package]] -name = "pest_derive" -version = "2.7.3" +name = "phf_codegen" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bee7be22ce7918f641a33f08e3f43388c7656772244e2bbb2477f44cc9021a" +checksum = "e8d39688d359e6b34654d328e262234662d16cc0f60ec8dcbe5e718709342a5a" dependencies = [ - "pest", - "pest_generator", + "phf_generator", + "phf_shared", ] [[package]] -name = "pest_generator" -version = "2.7.3" +name = "phf_generator" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1511785c5e98d79a05e8a6bc34b4ac2168a0e3e92161862030ad84daa223141" +checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" dependencies = [ - "pest", - "pest_meta", - "proc-macro2", - "quote", - "syn 2.0.35", + "phf_shared", + "rand", ] [[package]] -name = "pest_meta" -version = "2.7.3" +name = "phf_shared" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42f0394d3123e33353ca5e1e89092e533d2cc490389f2bd6131c43c634ebc5f" +checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" dependencies = [ - "once_cell", - "pest", - "sha2", + "siphasher", ] [[package]] name = "pin-project" -version = "1.1.3" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.3" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.35", + "syn 2.0.74", ] [[package]] name = "pin-project-lite" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" @@ -2108,17 +2612,28 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "piper" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" +dependencies = [ + "atomic-waker", + "fastrand 2.1.0", + "futures-io", +] + [[package]] name = "pkg-config" -version = "0.3.27" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" [[package]] name = "plotters" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45" +checksum = "a15b6eccb8484002195a3e44fe65a4ce8e93a625797a063735536fd59cb01cf3" dependencies = [ "num-traits", "plotters-backend", @@ -2129,72 +2644,108 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609" +checksum = "414cec62c6634ae900ea1c56128dfe87cf63e7caece0852ec76aba307cebadb7" [[package]] name = "plotters-svg" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab" +checksum = "81b30686a7d9c3e010b84284bdd26a29f2138574f52f5eb6f794fc0ad924e705" dependencies = [ "plotters-backend", ] +[[package]] +name = "polling" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b2d323e8ca7996b3e23126511a523f7e62924d93ecd5ae73b333815b0eb3dce" +dependencies = [ + "autocfg", + "bitflags 1.3.2", + "cfg-if", + "concurrent-queue", + "libc", + "log", + "pin-project-lite", + "windows-sys 0.48.0", +] + +[[package]] +name = "polling" +version = "3.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc2790cd301dec6cd3b7a025e4815cf825724a51c98dccfe6a3e55f05ffb6511" +dependencies = [ + "cfg-if", + "concurrent-queue", + "hermit-abi 0.4.0", + "pin-project-lite", + "rustix 0.38.34", + "tracing", + "windows-sys 0.59.0", +] + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + [[package]] name = "ppv-lite86" -version = "0.2.17" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] [[package]] name = "predicates" -version = "2.1.5" +version = "3.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd" +checksum = "7e9086cc7640c29a356d1a29fd134380bee9d8f79a17410aa76e7ad295f42c97" dependencies = [ - "difflib", - "float-cmp", - "itertools", - "normalize-line-endings", + "anstyle", "predicates-core", - "regex", ] [[package]] name = "predicates-core" -version = "1.0.6" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b794032607612e7abeb4db69adb4e33590fa6cf1149e95fd7cb00e634b92f174" +checksum = "ae8177bee8e75d6846599c6b9ff679ed51e882816914eec639944d7c9aa11931" [[package]] name = "predicates-tree" -version = "1.0.9" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368ba315fb8c5052ab692e68a0eefec6ec57b23a36959c14496f0b0df2c0cecf" +checksum = "41b740d195ed3166cd147c8047ec98db0e22ec019eb8eeb76d343b795304fb13" dependencies = [ "predicates-core", "termtree", ] [[package]] -name = "proc-macro-crate" -version = "0.1.5" +name = "prettyplease" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d6ea3c4595b96363c13943497db34af4460fb474a95c43f4446ad341b8c9785" +checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" dependencies = [ - "toml 0.5.11", + "proc-macro2", + "syn 2.0.74", ] [[package]] name = "proc-macro-crate" -version = "1.3.1" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f4c021e1093a56626774e81216a4ce732a735e5bad4868a03f3ed65ca0c3919" +checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" dependencies = [ - "once_cell", - "toml_edit 0.19.15", + "toml_edit 0.21.1", ] [[package]] @@ -2223,13 +2774,26 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.67" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d433d9f1a3e8c1263d9456598b16fec66f4acc9a74dacffd35c7bb09b3a1328" +checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" dependencies = [ "unicode-ident", ] +[[package]] +name = "proc-macro2-diagnostics" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.74", + "version_check", + "yansi", +] + [[package]] name = "ptr_meta" version = "0.1.4" @@ -2250,11 +2814,22 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "quickcheck" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" +dependencies = [ + "env_logger", + "log", + "rand", +] + [[package]] name = "quote" -version = "1.0.33" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ "proc-macro2", ] @@ -2272,9 +2847,9 @@ dependencies = [ [[package]] name = "r2d2_mysql" -version = "24.0.0" +version = "25.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fe5127e6c21971cdb9580f2f54cbe6d9c2226eb861036c3ca6d390c25f52574" +checksum = "93963fe09ca35b0311d089439e944e42a6cb39bf8ea323782ddb31240ba2ae87" dependencies = [ "mysql", "r2d2", @@ -2282,9 +2857,9 @@ dependencies = [ [[package]] name = "r2d2_sqlite" -version = "0.22.0" +version = "0.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99f31323d6161385f385046738df520e0e8694fa74852d35891fc0be08348ddc" +checksum = "eb14dba8247a6a15b7fdbc7d389e2e6f03ee9f184f87117706d509c092dfe846" dependencies = [ "r2d2", "rusqlite", @@ -2329,9 +2904,9 @@ dependencies = [ [[package]] name = "rayon" -version = "1.7.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2df5196e37bcc87abebc0053e20787d73847bb33134a69841207dd0a47f03b" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" dependencies = [ "either", "rayon-core", @@ -2339,30 +2914,28 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.11.0" +version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b8f95bd6966f5c87776639160a66bd8ab9895d9d4ab01ddba9fc60661aebe8d" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" dependencies = [ - "crossbeam-channel", "crossbeam-deque", "crossbeam-utils", - "num_cpus", ] [[package]] name = "redox_syscall" -version = "0.3.5" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567664f262709473930a4bf9e51bf2ebf3348f2e748ccc50dea20646858f8f29" +checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.6.0", ] [[package]] name = "regex" -version = "1.9.5" +version = "1.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "697061221ea1b4a94a624f67d0ae2bfe4e22b8a17b6a192afb11046542cc8c47" +checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" dependencies = [ "aho-corasick", "memchr", @@ -2372,9 +2945,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.3.8" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2f401f4955220693b56f8ec66ee9c78abffd8d1c4f23dc41a23839eb88f0795" +checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" dependencies = [ "aho-corasick", "memchr", @@ -2383,26 +2956,32 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.7.5" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" + +[[package]] +name = "relative-path" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" +checksum = "ba39f3699c378cd8970968dcbff9c43159ea4cfbd88d43c00b22f2ef10a435d2" [[package]] name = "rend" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581008d2099240d37fb08d77ad713bcaec2c4d89d50b5b21a8bb1996bbab68ab" +checksum = "71fe3824f5629716b1589be05dacd749f6aa084c87e00e016714a8cdfccc997c" dependencies = [ "bytecheck", ] [[package]] name = "reqwest" -version = "0.11.20" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e9ad3fe7488d7e34558a2033d45a0c90b72d97b4f80705666fea71472e2e6a1" +checksum = "c7d6d2a27d57148378eb5e111173f4276ad26340ecc5c49a4a2152167a2d6a37" dependencies = [ - "base64 0.21.4", + "base64 0.22.1", "bytes", "encoding_rs", "futures-core", @@ -2410,8 +2989,11 @@ dependencies = [ "h2", "http", "http-body", + "http-body-util", "hyper", + "hyper-rustls", "hyper-tls", + "hyper-util", "ipnet", "js-sys", "log", @@ -2420,9 +3002,12 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", + "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", + "sync_wrapper 1.0.1", + "system-configuration", "tokio", "tokio-native-tls", "tower-service", @@ -2435,27 +3020,37 @@ dependencies = [ [[package]] name = "ring" -version = "0.16.20" +version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3053cf52e236a3ed746dfc745aa9cacf1b791d846bdaf412f60a8d7d6e17c8fc" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", + "cfg-if", + "getrandom", "libc", - "once_cell", "spin", "untrusted", - "web-sys", - "winapi", + "windows-sys 0.52.0", +] + +[[package]] +name = "ringbuf" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c65e4c865bc3d2e3294493dff0acf7e6c259d066e34e22059fa9c39645c3636" +dependencies = [ + "crossbeam-utils", ] [[package]] name = "rkyv" -version = "0.7.42" +version = "0.7.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0200c8230b013893c0b2d6213d6ec64ed2b9be2e0e016682b7224ff82cff5c58" +checksum = "5cba464629b3394fc4dbc6f940ff8f5b4ff5c7aef40f29166fd4ad12acbc99c0" dependencies = [ "bitvec", "bytecheck", + "bytes", "hashbrown 0.12.3", "ptr_meta", "rend", @@ -2467,9 +3062,9 @@ dependencies = [ [[package]] name = "rkyv_derive" -version = "0.7.42" +version = "0.7.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2e06b915b5c230a17d7a736d1e2e63ee753c256a8614ef3f5147b13a4f5541d" +checksum = "a7dddfff8de25e6f62b9d64e6e432bf1c6736c57d20323e15ee10435fbda7c65" dependencies = [ "proc-macro2", "quote", @@ -2477,23 +3072,42 @@ dependencies = [ ] [[package]] -name = "ron" -version = "0.7.1" +name = "rstest" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88073939a61e5b7680558e6be56b419e208420c2adb92be54921fa6b72283f1a" +checksum = "7b423f0e62bdd61734b67cd21ff50871dfaeb9cc74f869dcd6af974fbcb19936" dependencies = [ - "base64 0.13.1", - "bitflags 1.3.2", - "serde", + "futures", + "futures-timer", + "rstest_macros", + "rustc_version", +] + +[[package]] +name = "rstest_macros" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5e1711e7d14f74b12a58411c542185ef7fb7f2e7f8ee6e2940a883628522b42" +dependencies = [ + "cfg-if", + "glob", + "proc-macro-crate", + "proc-macro2", + "quote", + "regex", + "relative-path", + "rustc_version", + "syn 2.0.74", + "unicode-ident", ] [[package]] name = "rusqlite" -version = "0.29.0" +version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "549b9d036d571d42e6e85d1c1425e2ac83491075078ca9a15be021c56b1641f2" +checksum = "7753b721174eb8ff87a9a0e799e2d7bc3749323e773db92e0984debb00019d6e" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.6.0", "fallible-iterator", "fallible-streaming-iterator", "hashlink", @@ -2501,21 +3115,11 @@ dependencies = [ "smallvec", ] -[[package]] -name = "rust-ini" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6d5f2436026b4f6e79dc829837d467cc7e9a55ee40e750d716713540715a2df" -dependencies = [ - "cfg-if", - "ordered-multimap", -] - [[package]] name = "rust_decimal" -version = "1.32.0" +version = "1.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c4216490d5a413bc6d10fa4742bd7d4955941d062c0ef873141d6b0e7b30fd" +checksum = "1790d1c4c0ca81211399e0e0af16333276f375209e71a37b67698a373db5b47a" dependencies = [ "arrayvec", "borsh", @@ -2529,9 +3133,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc-hash" @@ -2550,59 +3154,84 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.13" +version = "0.37.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fea8ca367a3a01fe35e6943c400addf443c0f57670e6ec51196f71a4b8762dd2" +dependencies = [ + "bitflags 1.3.2", + "errno", + "io-lifetimes", + "libc", + "linux-raw-sys 0.3.8", + "windows-sys 0.48.0", +] + +[[package]] +name = "rustix" +version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7db8590df6dfcd144d22afd1b83b36c21a18d7cbc1dc4bb5295a8712e9eb662" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ - "bitflags 2.4.0", + "bitflags 2.6.0", "errno", "libc", - "linux-raw-sys", - "windows-sys", + "linux-raw-sys 0.4.14", + "windows-sys 0.52.0", ] [[package]] name = "rustls" -version = "0.21.7" +version = "0.23.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd8d6c9f025a446bc4d18ad9632e69aec8f287aa84499ee335599fabd20c3fd8" +checksum = "c58f8c84392efc0a126acce10fa59ff7b3d2ac06ab451a33f2741989b806b044" dependencies = [ - "log", - "ring", + "aws-lc-rs", + "once_cell", + "rustls-pki-types", "rustls-webpki", - "sct", + "subtle", + "zeroize", ] [[package]] name = "rustls-pemfile" -version = "1.0.3" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d3987094b1d07b653b7dfdc3f70ce9a1da9c51ac18c1b06b662e4f9a0e9f4b2" +checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" dependencies = [ - "base64 0.21.4", + "base64 0.22.1", + "rustls-pki-types", ] +[[package]] +name = "rustls-pki-types" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" + [[package]] name = "rustls-webpki" -version = "0.101.5" +version = "0.102.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45a27e3b59326c16e23d30aeb7a36a24cc0d29e71d68ff611cdfb4a01d013bed" +checksum = "8e6b52d4fda176fd835fdc55a835d4a89b8499cad995885a21149d5ad62f852e" dependencies = [ + "aws-lc-rs", "ring", + "rustls-pki-types", "untrusted", ] [[package]] name = "rustversion" -version = "1.0.14" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" [[package]] name = "ryu" -version = "1.0.15" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "same-file" @@ -2621,11 +3250,11 @@ checksum = "ece8e78b2f38ec51c51f5d475df0a7187ba5111b2a28bdc761ee05b075d40a71" [[package]] name = "schannel" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" +checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" dependencies = [ - "windows-sys", + "windows-sys 0.52.0", ] [[package]] @@ -2643,16 +3272,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "sct" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "seahash" version = "4.1.0" @@ -2661,11 +3280,11 @@ checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" [[package]] name = "security-framework" -version = "2.9.2" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.6.0", "core-foundation", "core-foundation-sys", "libc", @@ -2674,9 +3293,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.9.1" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +checksum = "75da29fe9b9b08fe9d6b22b5b4bcbc75d8db3aa31e639aa56bb62e9d46bfceaf" dependencies = [ "core-foundation-sys", "libc", @@ -2684,24 +3303,24 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.18" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" [[package]] name = "serde" -version = "1.0.188" +version = "1.0.206" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9e0fcba69a370eed61bcf2b728575f726b50b55cba78064753d708ddc7549e" +checksum = "5b3e4cd94123dd520a128bcd11e34d9e9e423e7e3e50425cb1b4b1e3549d0284" dependencies = [ "serde_derive", ] [[package]] name = "serde_bencode" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "934d8bdbaa0126dafaea9a8833424a211d9661897717846c6bb782349ca1c30d" +checksum = "a70dfc7b7438b99896e7f8992363ab8e2c4ba26aa5ec675d32d1c3c2c33d413e" dependencies = [ "serde", "serde_bytes", @@ -2709,40 +3328,55 @@ dependencies = [ [[package]] name = "serde_bytes" -version = "0.11.12" +version = "0.11.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab33ec92f677585af6d88c65593ae2375adde54efdbf16d597f2cbc7a6d368ff" +checksum = "387cc504cb06bb40a96c8e04e951fe01854cf6bc921053c954e4a606d9675c6a" dependencies = [ "serde", ] [[package]] name = "serde_derive" -version = "1.0.188" +version = "1.0.206" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eca7ac642d82aa35b60049a6eccb4be6be75e599bd2e9adb5f875a737654af2" +checksum = "fabfb6138d2383ea8208cf98ccf69cdfb1aff4088460681d84189aa259762f97" dependencies = [ "proc-macro2", "quote", - "syn 2.0.35", + "syn 2.0.74", +] + +[[package]] +name = "serde_html_form" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de514ef58196f1fc96dcaef80fe6170a1ce6215df9687a93fe8300e773fefc5" +dependencies = [ + "form_urlencoded", + "indexmap 2.3.0", + "itoa", + "ryu", + "serde", ] [[package]] name = "serde_json" -version = "1.0.107" +version = "1.0.124" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" +checksum = "66ad62847a56b3dba58cc891acd13884b9c61138d330c0d7b6181713d4fce38d" dependencies = [ + "indexmap 2.3.0", "itoa", + "memchr", "ryu", "serde", ] [[package]] name = "serde_path_to_error" -version = "0.1.14" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4beec8bce849d58d06238cb50db2e1c417cfeafa4c63f692b15c82b7c80f8335" +checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" dependencies = [ "itoa", "serde", @@ -2750,20 +3384,20 @@ dependencies = [ [[package]] name = "serde_repr" -version = "0.1.16" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8725e1dfadb3a50f7e5ce0b1a540466f6ed3fe7a0fca2ac2b8b831d31316bd00" +checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.35", + "syn 2.0.74", ] [[package]] name = "serde_spanned" -version = "0.6.3" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96426c9936fd7a0124915f9185ea1d20aa9445cc9821142f0a73bc9207a2e186" +checksum = "eb5b1b31579f3811bf615c144393417496f152e12ac8b7663bf664f4a815306d" dependencies = [ "serde", ] @@ -2782,16 +3416,17 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.3.0" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ca3b16a3d82c4088f343b7480a93550b3eabe1a358569c2dfe38bbcead07237" +checksum = "69cecfa94848272156ea67b2b1a53f20fc7bc638c4a46d2f8abde08f05f4b857" dependencies = [ - "base64 0.21.4", + "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.0.0", + "indexmap 2.3.0", "serde", + "serde_derive", "serde_json", "serde_with_macros", "time", @@ -2799,21 +3434,21 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.3.0" +version = "3.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e6be15c453eb305019bfa438b1593c731f36a289a7853f7707ee29e870b3b3c" +checksum = "a8fee4991ef4f274617a51ad4af30519438dacb2f56ac773b08a1922ff743350" dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.35", + "syn 2.0.74", ] [[package]] name = "sha1" -version = "0.10.5" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ "cfg-if", "cpufeatures", @@ -2822,26 +3457,35 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.7" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479fb9d862239e610720565ca91403019f2f00410f1864c5aa7479b950a76ed8" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ "cfg-if", "cpufeatures", "digest", ] +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + [[package]] name = "shlex" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7cee0529a6d40f580e7a5e6c495c8fbfe21b7b52795ed4bb5e62cdf92bc6380" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] @@ -2852,6 +3496,12 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a" +[[package]] +name = "siphasher" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" + [[package]] name = "slab" version = "0.4.9" @@ -2863,15 +3513,15 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.11.0" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "socket2" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64a4a911eed85daf18834cfaa86a79b7d266ff93ff5ba14005426219480ed662" +checksum = "9f7916fc008ca5542385b89a3d3ce689953c143e9304a9bf8beec1de48994c0d" dependencies = [ "libc", "winapi", @@ -2879,19 +3529,19 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.4" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4031e820eb552adee9295814c0ced9e5cf38ddf1e8b7d566d6de8e2538ea989e" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] name = "spin" -version = "0.5.2" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" [[package]] name = "static_assertions" @@ -2901,9 +3551,9 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] name = "strsim" -version = "0.10.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "subprocess" @@ -2915,6 +3565,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + [[package]] name = "syn" version = "1.0.109" @@ -2928,20 +3584,59 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.35" +version = "2.0.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59bf04c28bee9043ed9ea1e41afc0552288d3aba9c6efdd78903b802926f4879" +checksum = "1fceb41e3d546d0bd83421d3409b1460cc7444cd389341a4c880fe7a042cb3d7" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] +[[package]] +name = "syn_derive" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b" +dependencies = [ + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.74", +] + [[package]] name = "sync_wrapper" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + +[[package]] +name = "sync_wrapper" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" + +[[package]] +name = "system-configuration" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" +dependencies = [ + "bitflags 1.3.2", + "core-foundation", + "system-configuration-sys", +] + +[[package]] +name = "system-configuration-sys" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" +dependencies = [ + "core-foundation-sys", + "libc", +] [[package]] name = "tap" @@ -2949,24 +3644,41 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" +[[package]] +name = "tdyne-peer-id" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dde285ba6f835045648f9d4f4703f778aaafb47421d9c5dff47be1534370c3e" + +[[package]] +name = "tdyne-peer-id-registry" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1923b2d356e080e8bee847c39b58de293309df2fe0bc9ecd859ae3210e868c25" +dependencies = [ + "phf", + "phf_codegen", + "tdyne-peer-id", +] + [[package]] name = "tempfile" -version = "3.8.0" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb94d2f3cc536af71caac6b6fcebf65860b347e7ce0cc9ebe8f70d3e521054ef" +checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" dependencies = [ "cfg-if", - "fastrand", - "redox_syscall", - "rustix", - "windows-sys", + "fastrand 2.1.0", + "once_cell", + "rustix 0.38.34", + "windows-sys 0.59.0", ] [[package]] name = "termcolor" -version = "1.2.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be55cf8942feac5c765c2c993422806843c9a9a45d4d5c407ad6dd2ea95eb9b6" +checksum = "06794f8f6c5c898b3275aebefa6b8a1cb24cd2c6c79397ab15774837a0bc5755" dependencies = [ "winapi-util", ] @@ -2979,32 +3691,44 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.48" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d6d7a740b8a666a7e828dd00da9c0dc290dff53154ea77ac109281de90589b7" +checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.48" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49922ecae66cc8a249b77e68d1d0623c1b2c514f0060c27cdc68bd62a1219d35" +checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2", "quote", - "syn 2.0.35", + "syn 2.0.74", +] + +[[package]] +name = "thread_local" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +dependencies = [ + "cfg-if", + "once_cell", ] [[package]] name = "time" -version = "0.3.28" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17f6bb557fd245c28e6411aa56b6403c689ad95061f50e4be16c274e70a17e48" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "itoa", + "num-conv", + "powerfmt", "serde", "time-core", "time-macros", @@ -3012,16 +3736,17 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7300fbefb4dadc1af235a9cef3737cea692a9d97e1b9cbcd4ebdae6f8868e6fb" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.14" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a942f44339478ef67935ab2bbaec2fb0322496cf3cbe84b261e06ac3814c572" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ + "num-conv", "time-core", ] @@ -3037,9 +3762,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.6.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" dependencies = [ "tinyvec_macros", ] @@ -3052,31 +3777,30 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.32.0" +version = "1.39.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17ed6077ed6cd6c74735e21f37eb16dc3935f96878b1fe961074089cc80893f9" +checksum = "daa4fb1bc778bd6f04cbfc4bb2d06a7396a8f299dc33ea1900cedaa316f467b1" dependencies = [ "backtrace", "bytes", "libc", "mio", - "num_cpus", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.4", + "socket2 0.5.7", "tokio-macros", - "windows-sys", + "windows-sys 0.52.0", ] [[package]] name = "tokio-macros" -version = "2.1.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.35", + "syn 2.0.74", ] [[package]] @@ -3091,146 +3815,164 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.24.1" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ "rustls", + "rustls-pki-types", "tokio", ] [[package]] name = "tokio-util" -version = "0.7.8" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "806fe8c2c87eccc8b3267cbae29ed3ab2d0bd37fca70ab622e46aaa9375ddb7d" +checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" dependencies = [ "bytes", "futures-core", "futures-sink", "pin-project-lite", "tokio", - "tracing", -] - -[[package]] -name = "toml" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" -dependencies = [ - "serde", ] [[package]] name = "toml" -version = "0.8.0" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c226a7bba6d859b63c92c4b4fe69c5b6b72d0cb897dbc8e6012298e6154cb56e" +checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.20.0", + "toml_edit 0.22.20", ] [[package]] name = "toml_datetime" -version = "0.6.3" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.19.15" +version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" +checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.0.0", + "indexmap 2.3.0", "toml_datetime", - "winnow", + "winnow 0.5.40", ] [[package]] name = "toml_edit" -version = "0.20.0" +version = "0.22.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ff63e60a958cefbb518ae1fd6566af80d9d4be430a33f3723dfc47d1d411d95" +checksum = "583c44c02ad26b0c3f3066fe629275e50627026c51ac2e595cca4c230ce1ce1d" dependencies = [ - "indexmap 2.0.0", + "indexmap 2.3.0", "serde", "serde_spanned", "toml_datetime", - "winnow", + "winnow 0.6.18", ] [[package]] name = "torrust-tracker" -version = "3.0.0-alpha.11" +version = "3.0.0-alpha.12" dependencies = [ + "anyhow", "aquatic_udp_protocol", - "async-trait", "axum", "axum-client-ip", + "axum-extra", "axum-server", - "binascii", + "camino", "chrono", - "config", + "clap", + "crossbeam-skiplist", + "dashmap", "derive_more", - "fern", + "figment", "futures", + "futures-util", + "hex-literal", + "http-body", "hyper", + "hyper-util", "lazy_static", "local-ip-address", - "log", "mockall", "multimap", - "openssl", + "parking_lot", "percent-encoding", + "pin-project-lite", "r2d2", "r2d2_mysql", "r2d2_sqlite", "rand", + "regex", "reqwest", + "ringbuf", "serde", "serde_bencode", "serde_bytes", "serde_json", "serde_repr", - "serde_urlencoded", "serde_with", "thiserror", "tokio", + "torrust-tracker-clock", "torrust-tracker-configuration", "torrust-tracker-contrib-bencode", "torrust-tracker-located-error", "torrust-tracker-primitives", "torrust-tracker-test-helpers", + "torrust-tracker-torrent-repository", + "tower", "tower-http", + "trace", + "tracing", + "tracing-subscriber", + "url", "uuid", + "zerocopy", +] + +[[package]] +name = "torrust-tracker-clock" +version = "3.0.0-alpha.12" +dependencies = [ + "chrono", + "lazy_static", + "torrust-tracker-primitives", ] [[package]] name = "torrust-tracker-configuration" -version = "3.0.0-alpha.11" +version = "3.0.0-alpha.12" dependencies = [ - "config", - "log", + "camino", + "derive_more", + "figment", "serde", + "serde_json", "serde_with", "thiserror", - "toml 0.8.0", + "toml", "torrust-tracker-located-error", - "torrust-tracker-primitives", + "url", "uuid", ] [[package]] name = "torrust-tracker-contrib-bencode" -version = "3.0.0-alpha.11" +version = "3.0.0-alpha.12" dependencies = [ "criterion", "error-chain", @@ -3238,27 +3980,46 @@ dependencies = [ [[package]] name = "torrust-tracker-located-error" -version = "3.0.0-alpha.11" +version = "3.0.0-alpha.12" dependencies = [ - "log", "thiserror", + "tracing", ] [[package]] name = "torrust-tracker-primitives" -version = "3.0.0-alpha.11" +version = "3.0.0-alpha.12" dependencies = [ + "binascii", "derive_more", "serde", + "tdyne-peer-id", + "tdyne-peer-id-registry", + "thiserror", ] [[package]] name = "torrust-tracker-test-helpers" -version = "3.0.0-alpha.11" +version = "3.0.0-alpha.12" dependencies = [ - "lazy_static", "rand", "torrust-tracker-configuration", +] + +[[package]] +name = "torrust-tracker-torrent-repository" +version = "3.0.0-alpha.12" +dependencies = [ + "async-std", + "criterion", + "crossbeam-skiplist", + "dashmap", + "futures", + "parking_lot", + "rstest", + "tokio", + "torrust-tracker-clock", + "torrust-tracker-configuration", "torrust-tracker-primitives", ] @@ -3280,23 +4041,24 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.4.4" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" +checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ "async-compression", - "bitflags 2.4.0", + "bitflags 2.6.0", "bytes", "futures-core", - "futures-util", "http", "http-body", - "http-range-header", + "http-body-util", "pin-project-lite", "tokio", "tokio-util", "tower-layer", "tower-service", + "tracing", + "uuid", ] [[package]] @@ -3311,32 +4073,93 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +[[package]] +name = "trace" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ad0c048e114d19d1140662762bfdb10682f3bc806d8be18af846600214dd9af" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "tracing" -version = "0.1.37" +version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ - "cfg-if", "log", "pin-project-lite", + "tracing-attributes", "tracing-core", ] +[[package]] +name = "tracing-attributes" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.74", +] + [[package]] name = "tracing-core" -version = "0.1.31" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0955b8137a1df6f1a2e9a37d8a6656291ff0297c1a97c24e0d8425fe2312f79a" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" dependencies = [ + "log", "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-serde" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" +dependencies = [ + "serde", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +dependencies = [ + "nu-ansi-term", + "serde", + "serde_json", + "sharded-slab", + "smallvec", + "thread_local", + "tracing-core", + "tracing-log", + "tracing-serde", ] [[package]] name = "try-lock" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "twox-hash" @@ -3356,16 +4179,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" [[package]] -name = "ucd-trie" -version = "0.1.6" +name = "uncased" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" +checksum = "e1b88fcfe09e89d3866a5c11019378088af2d24c3fbd4f0543f96b479ec90697" +dependencies = [ + "version_check", +] [[package]] name = "unicode-bidi" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-ident" @@ -3375,40 +4201,59 @@ checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" dependencies = [ "tinyvec", ] [[package]] name = "untrusted" -version = "0.7.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.4.1" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", "idna", "percent-encoding", + "serde", ] +[[package]] +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + [[package]] name = "uuid" -version = "1.4.1" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79daa5ed5740825c40b389c5e50312b9c86df53fccd33f281df655642b43869d" +checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" dependencies = [ "getrandom", "rand", ] +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + +[[package]] +name = "value-bag" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a84c137d37ab0142f0f2ddfe332651fdbf252e7b7dbb4e67b6c1f1b2e925101" + [[package]] name = "vcpkg" version = "0.2.15" @@ -3417,15 +4262,21 @@ checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "version_check" -version = "0.9.4" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "waker-fn" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +checksum = "317211a0dc0ceedd78fb2ca9a44aed3d7b9b26f81870d485c07122b4350673b7" [[package]] name = "walkdir" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", @@ -3448,9 +4299,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -3458,24 +4309,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.35", + "syn 2.0.74", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.37" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" +checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" dependencies = [ "cfg-if", "js-sys", @@ -3485,9 +4336,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3495,33 +4346,45 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.35", + "syn 2.0.74", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.87" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "web-sys" -version = "0.3.64" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" dependencies = [ "js-sys", "wasm-bindgen", ] +[[package]] +name = "which" +version = "4.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" +dependencies = [ + "either", + "home", + "once_cell", + "rustix 0.38.34", +] + [[package]] name = "winapi" version = "0.3.9" @@ -3540,11 +4403,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.5" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "winapi", + "windows-sys 0.59.0", ] [[package]] @@ -3554,12 +4417,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] -name = "windows" -version = "0.48.0" +name = "windows-core" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets", + "windows-targets 0.52.6", ] [[package]] @@ -3568,7 +4431,25 @@ version = "0.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" dependencies = [ - "windows-targets", + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", ] [[package]] @@ -3577,13 +4458,29 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] [[package]] @@ -3592,59 +4489,116 @@ version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + [[package]] name = "windows_aarch64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + [[package]] name = "windows_i686_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + [[package]] name = "windows_i686_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + [[package]] name = "windows_x86_64_gnu" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + [[package]] name = "windows_x86_64_msvc" version = "0.48.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" + +[[package]] +name = "winnow" +version = "0.5.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" +dependencies = [ + "memchr", +] + [[package]] name = "winnow" -version = "0.5.15" +version = "0.6.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c2e3184b9c4e92ad5167ca73039d0c42476302ab603e2fec4487511f38ccefc" +checksum = "68a9bda4691f099d435ad181000724da8e5899daa10713c2d432552b9ccd3a6f" dependencies = [ "memchr", ] [[package]] name = "winreg" -version = "0.50.0" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" +checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" dependencies = [ "cfg-if", - "windows-sys", + "windows-sys 0.48.0", ] [[package]] @@ -3657,40 +4611,76 @@ dependencies = [ ] [[package]] -name = "yaml-rust" -version = "0.4.5" +name = "yansi" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" + +[[package]] +name = "zerocopy" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +dependencies = [ + "byteorder", + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.7.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.74", +] + +[[package]] +name = "zeroize" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" +checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "linked-hash-map", + "proc-macro2", + "quote", + "syn 2.0.74", ] [[package]] name = "zstd" -version = "0.12.4" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a27595e173641171fc74a1232b7b1c7a7cb6e18222c11e9dfb9888fa424c53c" +checksum = "fcf2b778a664581e31e389454a7072dab1647606d44f7feea22cd5abb9c9f3f9" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "6.0.6" +version = "7.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee98ffd0b48ee95e6c5168188e44a54550b1564d9d530ee21d5f0eaed1069581" +checksum = "54a3ab4db68cea366acc5c897c7b4d4d1b8994a9cd6e6f841f8964566a419059" dependencies = [ - "libc", "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.8+zstd.1.5.5" +version = "2.0.13+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5556e6ee25d32df2586c098bbfa278803692a20d0ab9565e049480d52707ec8c" +checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" dependencies = [ "cc", - "libc", "pkg-config", ] diff --git a/Cargo.toml b/Cargo.toml index 8c83d5834..7f9d211c3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,4 +1,5 @@ [package] +default-run = "torrust-tracker" name = "torrust-tracker" readme = "README.md" @@ -8,7 +9,7 @@ documentation.workspace = true edition.workspace = true homepage.workspace = true keywords.workspace = true -license-file.workspace = true +license.workspace = true publish.workspace = true repository.workspace = true rust-version.workspace = true @@ -22,65 +23,95 @@ documentation = "https://docs.rs/crate/torrust-tracker/" edition = "2021" homepage = "https://torrust.com/" keywords = ["bittorrent", "file-sharing", "peer-to-peer", "torrent", "tracker"] -license-file = "COPYRIGHT" +license = "AGPL-3.0-only" publish = true repository = "https://github.com/torrust/torrust-tracker" rust-version = "1.72" -version = "3.0.0-alpha.11" +version = "3.0.0-alpha.12" [dependencies] -aquatic_udp_protocol = "0.8" -async-trait = "0.1" -axum = "0.6" -axum-client-ip = "0.4" -axum-server = { version = "0.5", features = ["tls-rustls"] } -binascii = "0.1" -chrono = { version = "0.4", default-features = false, features = ["clock"] } -config = "0.13" -derive_more = "0.99" -fern = "0.6" -futures = "0.3" -hyper = "0.14" -lazy_static = "1.4" -log = { version = "0.4", features = ["release_max_level_info"] } -multimap = "0.9" -openssl = { version = "0.10", features = ["vendored"] } -percent-encoding = "2.2" -r2d2 = "0.8" -r2d2_mysql = "24.0" -r2d2_sqlite = { version = "0.22", features = ["bundled"] } -rand = "0.8" -serde = { version = "1.0", features = ["derive"] } -serde_bencode = "^0.2" -serde_json = "1.0" -serde_with = "3.2" -thiserror = "1.0" -tokio = { version = "1.29", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } -torrust-tracker-configuration = { version = "3.0.0-alpha.11", path = "packages/configuration" } -torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.11", path = "contrib/bencode" } -torrust-tracker-located-error = { version = "3.0.0-alpha.11", path = "packages/located-error" } -torrust-tracker-primitives = { version = "3.0.0-alpha.11", path = "packages/primitives" } -tower-http = { version = "0.4", features = ["compression-full"] } +anyhow = "1" +aquatic_udp_protocol = "0" +axum = { version = "0", features = ["macros"] } +axum-client-ip = "0" +axum-extra = { version = "0", features = ["query"] } +axum-server = { version = "0", features = ["tls-rustls"] } +camino = { version = "1", features = ["serde", "serde1"] } +chrono = { version = "0", default-features = false, features = ["clock"] } +clap = { version = "4", features = ["derive", "env"] } +crossbeam-skiplist = "0" +dashmap = "6" +derive_more = "0" +figment = "0" +futures = "0" +futures-util = "0" +hex-literal = "0" +http-body = "1" +hyper = "1" +hyper-util = { version = "0", features = ["http1", "http2", "tokio"] } +lazy_static = "1" +multimap = "0" +parking_lot = "0" +percent-encoding = "2" +pin-project-lite = "0" +r2d2 = "0" +r2d2_mysql = "25" +r2d2_sqlite = { version = "0", features = ["bundled"] } +rand = "0" +regex = "1" +reqwest = { version = "0", features = ["json"] } +ringbuf = "0" +serde = { version = "1", features = ["derive"] } +serde_bencode = "0" +serde_bytes = "0" +serde_json = { version = "1", features = ["preserve_order"] } +serde_repr = "0" +serde_with = { version = "3.9.0", features = ["json"] } +thiserror = "1" +tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +torrust-tracker-clock = { version = "3.0.0-alpha.12", path = "packages/clock" } +torrust-tracker-configuration = { version = "3.0.0-alpha.12", path = "packages/configuration" } +torrust-tracker-contrib-bencode = { version = "3.0.0-alpha.12", path = "contrib/bencode" } +torrust-tracker-located-error = { version = "3.0.0-alpha.12", path = "packages/located-error" } +torrust-tracker-primitives = { version = "3.0.0-alpha.12", path = "packages/primitives" } +torrust-tracker-torrent-repository = { version = "3.0.0-alpha.12", path = "packages/torrent-repository" } +tower = { version = "0", features = ["timeout"] } +tower-http = { version = "0", features = ["compression-full", "cors", "propagate-header", "request-id", "trace"] } +trace = "0" +tracing = "0" +tracing-subscriber = { version = "0", features = ["json"] } +url = { version = "2", features = ["serde"] } uuid = { version = "1", features = ["v4"] } +zerocopy = "0" + +[package.metadata.cargo-machete] +ignored = ["crossbeam-skiplist", "dashmap", "figment", "parking_lot", "serde_bytes"] [dev-dependencies] -local-ip-address = "0.5" -mockall = "0.11" -reqwest = { version = "0.11.18", features = ["json"] } -serde_bytes = "0.11" -serde_repr = "0.1" -serde_urlencoded = "0.7" -torrust-tracker-test-helpers = { version = "3.0.0-alpha.11", path = "packages/test-helpers" } +local-ip-address = "0" +mockall = "0" +torrust-tracker-test-helpers = { version = "3.0.0-alpha.12", path = "packages/test-helpers" } [workspace] -members = ["contrib/bencode", "packages/configuration", "packages/located-error", "packages/primitives", "packages/test-helpers"] +members = [ + "contrib/bencode", + "packages/configuration", + "packages/located-error", + "packages/primitives", + "packages/test-helpers", + "packages/torrent-repository", +] [profile.dev] debug = 1 -lto = "thin" +lto = "fat" opt-level = 1 [profile.release] debug = 1 lto = "fat" opt-level = 3 + +[profile.release-debug] +debug = true +inherits = "release" diff --git a/Containerfile b/Containerfile index be71017db..263053390 100644 --- a/Containerfile +++ b/Containerfile @@ -3,13 +3,13 @@ # Torrust Tracker ## Builder Image -FROM rust:bookworm as chef +FROM docker.io/library/rust:bookworm AS chef WORKDIR /tmp RUN curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash RUN cargo binstall --no-confirm cargo-chef cargo-nextest ## Tester Image -FROM rust:slim-bookworm as tester +FROM docker.io/library/rust:slim-bookworm AS tester WORKDIR /tmp RUN apt-get update; apt-get install -y curl sqlite3; apt-get autoclean @@ -21,7 +21,7 @@ RUN mkdir -p /app/share/torrust/default/database/; \ sqlite3 /app/share/torrust/default/database/tracker.sqlite3.db "VACUUM;" ## Su Exe Compile -FROM docker.io/library/gcc:bookworm as gcc +FROM docker.io/library/gcc:bookworm AS gcc COPY ./contrib/dev-tools/su-exec/ /usr/local/src/su-exec/ RUN cc -Wall -Werror -g /usr/local/src/su-exec/su-exec.c -o /usr/local/bin/su-exec; chmod +x /usr/local/bin/su-exec @@ -62,7 +62,7 @@ RUN cargo nextest archive --tests --benches --examples --workspace --all-targets # Extract and Test (debug) -FROM tester as test_debug +FROM tester AS test_debug WORKDIR /test COPY . /test/src/ COPY --from=build_debug \ @@ -76,7 +76,7 @@ RUN mkdir /app/lib/; cp -l $(realpath $(ldd /app/bin/torrust-tracker | grep "lib RUN chown -R root:root /app; chmod -R u=rw,go=r,a+X /app; chmod -R a+x /app/bin # Extract and Test (release) -FROM tester as test +FROM tester AS test WORKDIR /test COPY . /test/src COPY --from=build \ @@ -85,34 +85,37 @@ COPY --from=build \ RUN cargo nextest run --workspace-remap /test/src/ --extract-to /test/src/ --no-run --archive-file /test/torrust-tracker.tar.zst RUN cargo nextest run --workspace-remap /test/src/ --target-dir-remap /test/src/target/ --cargo-metadata /test/src/target/nextest/cargo-metadata.json --binaries-metadata /test/src/target/nextest/binaries-metadata.json -RUN mkdir -p /app/bin/; cp -l /test/src/target/release/torrust-tracker /app/bin/torrust-tracker +RUN mkdir -p /app/bin/; cp -l /test/src/target/release/torrust-tracker /app/bin/torrust-tracker; cp -l /test/src/target/release/http_health_check /app/bin/http_health_check RUN mkdir -p /app/lib/; cp -l $(realpath $(ldd /app/bin/torrust-tracker | grep "libz\.so\.1" | awk '{print $3}')) /app/lib/libz.so.1 RUN chown -R root:root /app; chmod -R u=rw,go=r,a+X /app; chmod -R a+x /app/bin ## Runtime -FROM gcr.io/distroless/cc-debian12:debug as runtime +FROM gcr.io/distroless/cc-debian12:debug AS runtime RUN ["/busybox/cp", "-sp", "/busybox/sh","/busybox/cat","/busybox/ls","/busybox/env", "/bin/"] COPY --from=gcc --chmod=0555 /usr/local/bin/su-exec /bin/su-exec -ARG TORRUST_TRACKER_PATH_CONFIG="/etc/torrust/tracker/tracker.toml" -ARG TORRUST_TRACKER_DATABASE_DRIVER="sqlite3" +ARG TORRUST_TRACKER_CONFIG_TOML_PATH="/etc/torrust/tracker/tracker.toml" +ARG TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER="sqlite3" ARG USER_ID=1000 ARG UDP_PORT=6969 ARG HTTP_PORT=7070 ARG API_PORT=1212 +ARG HEALTH_CHECK_API_PORT=1313 -ENV TORRUST_TRACKER_PATH_CONFIG=${TORRUST_TRACKER_PATH_CONFIG} -ENV TORRUST_TRACKER_DATABASE_DRIVER=${TORRUST_TRACKER_DATABASE_DRIVER} +ENV TORRUST_TRACKER_CONFIG_TOML_PATH=${TORRUST_TRACKER_CONFIG_TOML_PATH} +ENV TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER=${TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER} ENV USER_ID=${USER_ID} ENV UDP_PORT=${UDP_PORT} ENV HTTP_PORT=${HTTP_PORT} ENV API_PORT=${API_PORT} +ENV HEALTH_CHECK_API_PORT=${HEALTH_CHECK_API_PORT} ENV TZ=Etc/UTC EXPOSE ${UDP_PORT}/udp EXPOSE ${HTTP_PORT}/tcp EXPOSE ${API_PORT}/tcp +EXPOSE ${HEALTH_CHECK_API_PORT}/tcp RUN mkdir -p /var/lib/torrust/tracker /var/log/torrust/tracker /etc/torrust/tracker @@ -126,15 +129,17 @@ ENTRYPOINT ["/usr/local/bin/entry.sh"] ## Torrust-Tracker (debug) -FROM runtime as debug +FROM runtime AS debug ENV RUNTIME="debug" COPY --from=test_debug /app/ /usr/ RUN env CMD ["sh"] ## Torrust-Tracker (release) (default) -FROM runtime as release +FROM runtime AS release ENV RUNTIME="release" COPY --from=test /app/ /usr/ -# HEALTHCHECK CMD ["/usr/bin/wget", "--no-verbose", "--tries=1", "--spider", "localhost:${API_PORT}/version"] +HEALTHCHECK --interval=5s --timeout=5s --start-period=3s --retries=3 \ + CMD /usr/bin/http_health_check http://localhost:${HEALTH_CHECK_API_PORT}/health_check \ + || exit 1 CMD ["/usr/bin/torrust-tracker"] diff --git a/README.md b/README.md index e76659d5e..6d611d9a5 100644 --- a/README.md +++ b/README.md @@ -2,16 +2,14 @@ [![container_wf_b]][container_wf] [![coverage_wf_b]][coverage_wf] [![deployment_wf_b]][deployment_wf] [![testing_wf_b]][testing_wf] -__Torrust Tracker__, is a [BitTorrent][bittorrent] Tracker (a service that matchmakes peers and collects statistics) written in [Rust Language][rust] and [axum] (a modern web application framework). ___This tracker aims to be respectful to established standards, (both [formal][BEP 00] and [otherwise][torrent_source_felid]).___ +**Torrust Tracker** is a [BitTorrent][bittorrent] Tracker that matchmakes peers and collects statistics. Written in [Rust Language][rust] with the [Axum] web framework. **This tracker aims to be respectful to established standards, (both [formal][BEP 00] and [otherwise][torrent_source_felid]).** > This is a [Torrust][torrust] project and is in active development. It is community supported as well as sponsored by [Nautilus Cyberneering][nautilus]. -- _We have a [container guide][containers.md] for those who wish to get started with __Docker__ or __Podman___ - ## Key Features - [x] High Quality and Modern Rust Codebase. -- [x] [Documentation] Generated from Code Comments. +- [x] [Documentation][docs] Generated from Code Comments. - [x] [Comprehensive Suit][coverage] of Unit and Functional Tests. - [x] Good Performance in Busy Conditions. - [x] Support for `UDP`, `HTTP`, and `TLS` Sockets. @@ -21,43 +19,80 @@ __Torrust Tracker__, is a [BitTorrent][bittorrent] Tracker (a service that match - [x] Support [newTrackon][newtrackon] checks. - [x] Persistent `SQLite3` or `MySQL` Databases. +## Roadmap + +Core: + +- [ ] New option `want_ip_from_query_string`. See . +- [ ] Peer and torrents specific statistics. See . + +Persistence: + +- [ ] Support other databases like PostgreSQL. + +Performance: + +- [ ] More optimizations. See . + +Protocols: + +- [ ] WebTorrent. + +Integrations: + +- [ ] Monitoring (Prometheus). + +Utils: + +- [ ] Tracker client. +- [ ] Tracker checker. + +Others: + +- [ ] Support for Windows. +- [ ] Docker images for other architectures. + + + ## Implemented BitTorrent Enhancement Proposals (BEPs) +> > _[Learn more about BitTorrent Enhancement Proposals][BEP 00]_ -- [BEP 03] : The BitTorrent Protocol. -- [BEP 07] : IPv6 Support. -- [BEP 15] : UDP Tracker Protocol for BitTorrent. -- [BEP 23] : Tracker Returns Compact Peer Lists. -- [BEP 27] : Private Torrents. -- [BEP 48] : Tracker Protocol Extension: Scrape. - +- [BEP 03]: The BitTorrent Protocol. +- [BEP 07]: IPv6 Support. +- [BEP 15]: UDP Tracker Protocol for BitTorrent. +- [BEP 23]: Tracker Returns Compact Peer Lists. +- [BEP 27]: Private Torrents. +- [BEP 48]: Tracker Protocol Extension: Scrape. ## Getting Started ### Container Version -The Torrust Tracker is [deployed to DockerHub][dockerhub_torrust_tracker], you can run a demo immediately with the following commands: +The Torrust Tracker is [deployed to DockerHub][dockerhub], you can run a demo immediately with the following commands: -#### Docker: +#### Docker ```sh docker run -it torrust/tracker:develop ``` + > Please read our [container guide][containers.md] for more information. -#### Podman: +#### Podman ```sh -podman run -it torrust/tracker:develop +podman run -it docker.io/torrust/tracker:develop ``` + > Please read our [container guide][containers.md] for more information. ### Development Version -- Please assure you have the ___[latest stable (or nightly) version of rust][rust]___. -- Please assure that you computer has enough ram. ___Recommended 16GB.___ +- Please ensure you have the _**[latest stable (or nightly) version of rust][rust]___. +- Please ensure that your computer has enough RAM. _**Recommended 16GB.___ -#### Checkout, Test and Run: +#### Checkout, Test and Run ```sh # Checkout repository into a new folder: @@ -74,7 +109,8 @@ cargo test --tests --benches --examples --workspace --all-targets --all-features # Run the tracker: cargo run ``` -#### Customization: + +#### Customization ```sh # Copy the default configuration into the standard location: @@ -85,17 +121,17 @@ cp ./share/default/config/tracker.development.sqlite3.toml ./storage/tracker/etc vim ./storage/tracker/etc/tracker.toml # Run the tracker with the updated configuration: -TORRUST_TRACKER_PATH_CONFIG="./storage/tracker/etc/tracker.toml" cargo run +TORRUST_TRACKER_CONFIG_TOML_PATH="./storage/tracker/etc/tracker.toml" cargo run ``` _Optionally, you may choose to supply the entire configuration as an environmental variable:_ ```sh # Use a configuration supplied on an environmental variable: -TORRUST_TRACKER_CONFIG=$(cat "./storage/tracker/etc/tracker.toml") cargo run +TORRUST_TRACKER_CONFIG_TOML=$(cat "./storage/tracker/etc/tracker.toml") cargo run ``` -_For deployment you __should__ override the `api_admin_token` by using an environmental variable:_ +_For deployment, you **should** override the `api_admin_token` by using an environmental variable:_ ```sh # Generate a Secret Token: @@ -103,51 +139,82 @@ gpg --armor --gen-random 1 10 | tee ./storage/tracker/lib/tracker_api_admin_toke chmod go-rwx ./storage/tracker/lib/tracker_api_admin_token.secret # Override secret in configuration using an environmental variable: -TORRUST_TRACKER_CONFIG=$(cat "./storage/tracker/etc/tracker.toml") \ - TORRUST_TRACKER_API_ADMIN_TOKEN=$(cat "./storage/tracker/lib/tracker_api_admin_token.secret") \ +TORRUST_TRACKER_CONFIG_TOML=$(cat "./storage/tracker/etc/tracker.toml") \ + TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN=$(cat "./storage/tracker/lib/tracker_api_admin_token.secret") \ cargo run ``` -> Please view our [crate documentation][documentation] for more detailed instructions. +> Please view our [crate documentation][docs] for more detailed instructions. ### Services + The following services are provided by the default configuration: - UDP _(tracker)_ - `udp://127.0.0.1:6969/announce`. - HTTP _(tracker)_ - - `http://127.0.0.1:6969/announce`. + - `http://127.0.0.1:7070/announce`. - API _(management)_ - `http://127.0.0.1:1212/api/v1/stats?token=MyAccessToken`. - ## Documentation -- [Management API (Version 1)][api] -- [Tracker (HTTP/TLS)][http] -- [Tracker (UDP)][udp] +You can read the [latest documentation][docs] from . + +Some specific sections: + +- [Management API (Version 1)][API] +- [Tracker (HTTP/TLS)][HTTP] +- [Tracker (UDP)][UDP] + +## Benchmarking + +- [Benchmarking](./docs/benchmarking.md) ## Contributing -This is an open-source community supported project.
-We welcome contributions from the community! +We are happy to support and welcome new people to our project. Please consider our [contributor guide][guide.md].
+This is an open-source community-supported project. We welcome contributions from the community! -__How can you contribute?__ +**How can you contribute?** - Bug reports and feature requests. - Code contributions. You can start by looking at the issues labeled "[good first issues]". -- Documentation improvements. Check the [documentation] and [API documentation] for typos, errors, or missing information. +- Documentation improvements. Check the [documentation][docs] and [API documentation][API] for typos, errors, or missing information. - Participation in the community. You can help by answering questions in the [discussions]. ## License -The project is licensed under a dual license. See [COPYRIGHT]. +**Copyright (c) 2023 The Torrust Developers.** -## Acknowledgments +This program is free software: you can redistribute it and/or modify it under the terms of the [GNU Affero General Public License][AGPL_3_0] as published by the [Free Software Foundation][FSF], version 3. -This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [Dutch Bits]. Also thanks to [Naim A.] and [greatest-ape] for some parts of the code. Further added features and functions thanks to [Power2All]. +This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the [GNU Affero General Public License][AGPL_3_0] for more details. + +You should have received a copy of the *GNU Affero General Public License* along with this program. If not, see . + +Some files include explicit copyright notices and/or license notices. + +### Legacy Exception +For prosperity, versions of Torrust Tracker that are older than five years are automatically granted the [MIT-0][MIT_0] license in addition to the existing [AGPL-3.0-only][AGPL_3_0] license. +## Contributor Agreement + +The copyright of the Torrust Tracker is retained by the respective authors. + +**Contributors agree:** + +- That all their contributions be granted a license(s) **compatible** with the [Torrust Trackers License](#license). +- That all contributors signal **clearly** and **explicitly** any other compilable licenses if they are not: _[AGPL-3.0-only with the legacy MIT-0 exception](#license)_. + +**The Torrust-Tracker project has no copyright assignment agreement.** + +_We kindly ask you to take time and consider The Torrust Project [Contributor Agreement][agreement.md] in full._ + +## Acknowledgments + +This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [Dutch Bits]. Also thanks to [Naim A.] and [greatest-ape] for some parts of the code. Further added features and functions thanks to [Power2All]. [container_wf]: ../../actions/workflows/container.yaml [container_wf_b]: ../../actions/workflows/container.yaml/badge.svg @@ -165,7 +232,7 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D [coverage]: https://app.codecov.io/gh/torrust/torrust-tracker [torrust]: https://torrust.com/ -[dockerhub_torrust_tracker]: https://hub.docker.com/r/torrust/tracker/tags +[dockerhub]: https://hub.docker.com/r/torrust/tracker/tags [torrent_source_felid]: https://github.com/qbittorrent/qBittorrent/discussions/19406 @@ -179,16 +246,20 @@ This project was a joint effort by [Nautilus Cyberneering GmbH][nautilus] and [D [containers.md]: ./docs/containers.md -[api]: https://docs.rs/torrust-tracker/3.0.0-alpha.11/torrust_tracker/servers/apis/v1 -[http]: https://docs.rs/torrust-tracker/3.0.0-alpha.11/torrust_tracker/servers/http -[udp]: https://docs.rs/torrust-tracker/3.0.0-alpha.11/torrust_tracker/servers/udp +[docs]: https://docs.rs/torrust-tracker/latest/ +[api]: https://docs.rs/torrust-tracker/latest/torrust_tracker/servers/apis/v1 +[http]: https://docs.rs/torrust-tracker/latest/torrust_tracker/servers/http +[udp]: https://docs.rs/torrust-tracker/latest/torrust_tracker/servers/udp [good first issues]: https://github.com/torrust/torrust-tracker/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22 -[documentation]: https://docs.rs/torrust-tracker/ -[API documentation]: https://docs.rs/torrust-tracker/3.0.0-alpha.11/torrust_tracker/servers/apis/v1 [discussions]: https://github.com/torrust/torrust-tracker/discussions -[COPYRIGHT]: ./COPYRIGHT +[guide.md]: https://github.com/torrust/.github/blob/main/info/contributing.md +[agreement.md]: https://github.com/torrust/.github/blob/main/info/licensing/contributor_agreement_v01.md + +[AGPL_3_0]: ./docs/licenses/LICENSE-AGPL_3_0 +[MIT_0]: ./docs/licenses/LICENSE-MIT_0 +[FSF]: https://www.fsf.org/ [nautilus]: https://github.com/orgs/Nautilus-Cyberneering/ [Dutch Bits]: https://dutchbits.nl diff --git a/cSpell.json b/cSpell.json index c9b547c90..6a9da0324 100644 --- a/cSpell.json +++ b/cSpell.json @@ -1,9 +1,11 @@ { "words": [ + "Addrs", "adduser", "alekitto", "appuser", "Arvid", + "asyn", "autoclean", "AUTOINCREMENT", "automock", @@ -17,9 +19,13 @@ "binstall", "Bitflu", "bools", + "Bragilevsky", "bufs", + "buildid", "Buildx", "byteorder", + "callgrind", + "camino", "canonicalize", "canonicalized", "certbot", @@ -28,34 +34,54 @@ "codecov", "codegen", "completei", + "Condvar", "connectionless", "Containerfile", + "conv", "curr", + "cvar", "Cyberneering", + "dashmap", + "datagram", "datetime", + "debuginfo", + "Deque", "Dijke", "distroless", "dockerhub", "downloadedi", "dtolnay", "elif", + "Eray", "filesd", + "flamegraph", "Freebox", + "FrostegÃ¥rd", "gecos", "Grcov", "hasher", + "heaptrack", "hexlify", "hlocalhost", "Hydranode", + "hyperthread", "Icelake", + "iiiiiiiiiiiiiiiiiiiid", "imdl", + "impls", "incompletei", "infohash", "infohashes", "infoschema", "Intermodal", "intervali", + "Joakim", + "kallsyms", + "Karatay", + "kcachegrind", + "kexec", "keyout", + "kptr", "lcov", "leecher", "leechers", @@ -67,6 +93,7 @@ "matchmakes", "metainfo", "middlewares", + "misresolved", "mockall", "multimap", "myacicontext", @@ -79,36 +106,52 @@ "nonroot", "Norberg", "numwant", + "nvCFlJCq7fz7Qx6KoKTDiMZvns8l5Kw7", "oneshot", "ostr", "Pando", + "peekable", + "peerlist", + "programatik", "proot", "proto", "Quickstart", + "Radeon", "Rasterbar", "realpath", "reannounce", + "Registar", "repr", + "reqs", "reqwest", "rerequests", + "ringbuf", + "ringsize", "rngs", + "rosegment", "routable", + "rstest", "rusqlite", "RUSTDOCFLAGS", "RUSTFLAGS", "rustfmt", "Rustls", + "Ryzen", "Seedable", "serde", "Shareaza", "sharktorrent", "SHLVL", + "skiplist", + "slowloris", "socketaddr", "sqllite", "subsec", "Swatinem", "Swiftbit", "taiki", + "tdyne", + "tempfile", "thiserror", "tlsv", "Torrentstorm", @@ -121,15 +164,22 @@ "untuple", "uroot", "Vagaa", + "valgrind", + "Vitaly", + "vmlinux", "Vuze", + "Weidendorfer", "Werror", "whitespaces", + "Xacrimon", "XBTT", + "Xdebug", "Xeon", "Xtorrent", "Xunlei", "xxxxxxxxxxxxxxxxxxxxd", - "yyyyyyyyyyyyyyyyyyyyd" + "yyyyyyyyyyyyyyyyyyyyd", + "zerocopy" ], "enableFiletypes": [ "dockerfile", diff --git a/codecov.yml b/codecov.yaml similarity index 56% rename from codecov.yml rename to codecov.yaml index f0878195b..aaa25bf74 100644 --- a/codecov.yml +++ b/codecov.yaml @@ -4,3 +4,7 @@ coverage: default: target: auto threshold: 0.5% + patch: + default: + target: auto + threshold: 0.5% diff --git a/compose.yaml b/compose.yaml index 672ca6d0f..c2e7c63bd 100644 --- a/compose.yaml +++ b/compose.yaml @@ -4,8 +4,8 @@ services: image: torrust-tracker:release tty: true environment: - - TORRUST_TRACKER_DATABASE_DRIVER=${TORRUST_TRACKER_DATABASE_DRIVER:-mysql} - - TORRUST_TRACKER_API_ADMIN_TOKEN=${TORRUST_TRACKER_API_ADMIN_TOKEN:-MyAccessToken} + - TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER=${TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER:-mysql} + - TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN=${TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN:-MyAccessToken} networks: - server_side ports: diff --git a/contrib/bencode/Cargo.toml b/contrib/bencode/Cargo.toml index 3918aa6ba..f7bab0585 100644 --- a/contrib/bencode/Cargo.toml +++ b/contrib/bencode/Cargo.toml @@ -16,10 +16,10 @@ rust-version.workspace = true version.workspace = true [dependencies] -error-chain = "0.12" +error-chain = "0" [dev-dependencies] -criterion = "0.5" +criterion = "0" [[test]] name = "test" diff --git a/contrib/bencode/src/access/dict.rs b/contrib/bencode/src/access/dict.rs index 596d9535e..7efe93fc3 100644 --- a/contrib/bencode/src/access/dict.rs +++ b/contrib/bencode/src/access/dict.rs @@ -21,6 +21,7 @@ pub trait BDictAccess { impl<'a, V> BDictAccess<&'a [u8], V> for BTreeMap<&'a [u8], V> { fn to_list(&self) -> Vec<(&&'a [u8], &V)> { + #[allow(clippy::map_identity)] self.iter().map(|(k, v)| (k, v)).collect() } @@ -43,6 +44,7 @@ impl<'a, V> BDictAccess<&'a [u8], V> for BTreeMap<&'a [u8], V> { impl<'a, V> BDictAccess, V> for BTreeMap, V> { fn to_list(&self) -> Vec<(&Cow<'a, [u8]>, &V)> { + #[allow(clippy::map_identity)] self.iter().map(|(k, v)| (k, v)).collect() } diff --git a/contrib/bencode/src/access/list.rs b/contrib/bencode/src/access/list.rs index 840bffa1e..c6d1fc407 100644 --- a/contrib/bencode/src/access/list.rs +++ b/contrib/bencode/src/access/list.rs @@ -45,6 +45,14 @@ impl<'a, V: 'a> IndexMut for &'a mut dyn BListAccess { } } +impl<'a, V: 'a> dyn BListAccess { + pub fn iter(&'a self) -> impl Iterator { + self.into_iter() + } +} + +#[allow(unknown_lints)] +#[allow(clippy::into_iter_without_iter)] impl<'a, V: 'a> IntoIterator for &'a dyn BListAccess { type Item = &'a V; type IntoIter = BListIter<'a, V>; diff --git a/contrib/bencode/src/error.rs b/contrib/bencode/src/error.rs index 18ebe9605..54c589e3e 100644 --- a/contrib/bencode/src/error.rs +++ b/contrib/bencode/src/error.rs @@ -1,3 +1,5 @@ +#![allow(unknown_lints)] +#![allow(clippy::iter_without_into_iter)] use error_chain::error_chain; error_chain! { diff --git a/contrib/bencode/src/mutable/encode.rs b/contrib/bencode/src/mutable/encode.rs index 811c35816..25c91b41d 100644 --- a/contrib/bencode/src/mutable/encode.rs +++ b/contrib/bencode/src/mutable/encode.rs @@ -1,5 +1,3 @@ -use std::iter::Extend; - use crate::access::bencode::{BRefAccess, RefKind}; use crate::access::dict::BDictAccess; use crate::access::list::BListAccess; diff --git a/contrib/bencode/src/reference/bencode_ref.rs b/contrib/bencode/src/reference/bencode_ref.rs index 760dd3016..a6f2c15bc 100644 --- a/contrib/bencode/src/reference/bencode_ref.rs +++ b/contrib/bencode/src/reference/bencode_ref.rs @@ -125,8 +125,6 @@ impl<'a> BRefAccessExt<'a> for BencodeRef<'a> { #[cfg(test)] mod tests { - use std::default::Default; - use crate::access::bencode::BRefAccess; use crate::reference::bencode_ref::BencodeRef; use crate::reference::decode_opt::BDecodeOpt; diff --git a/contrib/bencode/src/reference/decode.rs b/contrib/bencode/src/reference/decode.rs index d2aa180f8..d35d1b597 100644 --- a/contrib/bencode/src/reference/decode.rs +++ b/contrib/bencode/src/reference/decode.rs @@ -177,8 +177,6 @@ fn peek_byte(bytes: &[u8], pos: usize) -> BencodeParseResult { #[cfg(test)] mod tests { - use std::default::Default; - use crate::access::bencode::BRefAccess; use crate::reference::bencode_ref::BencodeRef; use crate::reference::decode_opt::BDecodeOpt; diff --git a/contrib/bencode/src/reference/decode_opt.rs b/contrib/bencode/src/reference/decode_opt.rs index ac94d0311..8409cc72c 100644 --- a/contrib/bencode/src/reference/decode_opt.rs +++ b/contrib/bencode/src/reference/decode_opt.rs @@ -1,5 +1,3 @@ -use std::default::Default; - const DEFAULT_MAX_RECURSION: usize = 50; const DEFAULT_CHECK_KEY_SORT: bool = false; const DEFAULT_ENFORCE_FULL_DECODE: bool = true; @@ -41,7 +39,7 @@ impl BDecodeOpt { /// /// It may be useful to disable this if for example, the input bencode is prepended to /// some payload and you would like to disassociate it. In this case, to find where the - /// rest of the payload starts that wasn't decoded, get the bencode buffer, and call len(). + /// rest of the payload starts that wasn't decoded, get the bencode buffer, and call `len()`. #[must_use] pub fn enforce_full_decode(&self) -> bool { self.enforce_full_decode diff --git a/contrib/dev-tools/init/install-local.sh b/contrib/dev-tools/init/install-local.sh index f9806a0b8..747c357bc 100755 --- a/contrib/dev-tools/init/install-local.sh +++ b/contrib/dev-tools/init/install-local.sh @@ -7,6 +7,5 @@ mkdir -p ./storage/tracker/lib/database # Generate the sqlite database if it does not exist if ! [ -f "./storage/tracker/lib/database/sqlite3.db" ]; then - # todo: it should get the path from tracker.toml and only do it when we use sqlite - sqlite3 ./storage/tracker/lib/database/sqlite3.db "VACUUM;" + sqlite3 ./storage/tracker/lib/database/sqlite3.db "VACUUM;" fi diff --git a/docs/adrs/20240227164834_use_plural_for_modules_containing_collections.md b/docs/adrs/20240227164834_use_plural_for_modules_containing_collections.md new file mode 100644 index 000000000..beb3cee00 --- /dev/null +++ b/docs/adrs/20240227164834_use_plural_for_modules_containing_collections.md @@ -0,0 +1,35 @@ +# Use plural for modules containing collections of types + +## Description + +In Rust, the naming conventions for module names (mod names) generally lean +towards using the singular form, rather than plurals. This practice aligns with +Rust's emphasis on clarity and precision in code organization. The idea is that +a module name should represent a single concept or functionality, which often +means using a singular noun to describe what the module contains or does. + +However, it's important to note that conventions can vary depending on the +context or the specific project. Some projects may choose to use plural forms +for module names if they feel it more accurately represents the contents of the +module. For example, a module that contains multiple implementations of a +similar concept or utility functions related to a specific theme might be named +in the plural to reflect the diversity of its contents. + +This could have some pros anc cons. For example, for a module containing types of +requests you could refer to a concrete request with `request::Announce` or +`requests::Announce`. If you read a code line `request::Announce` is probably +better. However, if you read the filed or folder name `requests`gives you a +better idea of what the modules contains. + +## Agreement + +We agree on use plural in cases where the modules contain some types with the +same type of responsibility. For example: + +- `src/servers`. +- `src/servers/http/v1/requests`. +- `src/servers/http/v1/responses`. +- `src/servers/http/v1/services`. +- Etcetera. + +We will change them progressively. diff --git a/docs/adrs/README.md b/docs/adrs/README.md new file mode 100644 index 000000000..85986fc36 --- /dev/null +++ b/docs/adrs/README.md @@ -0,0 +1,23 @@ +# Architectural Decision Records (ADRs) + +This directory contains the architectural decision records (ADRs) for the +project. ADRs are a way to document the architectural decisions made in the +project. + +More info: . + +## How to add a new record + +For the prefix: + +```s +date -u +"%Y%m%d%H%M%S" +``` + +Then you can create a new markdown file with the following format: + +```s +20230510152112_title.md +``` + +For the time being, we are not following any specific template. diff --git a/docs/benchmarking.md b/docs/benchmarking.md new file mode 100644 index 000000000..7d0228737 --- /dev/null +++ b/docs/benchmarking.md @@ -0,0 +1,303 @@ +# Benchmarking + +We have two types of benchmarking: + +- E2E benchmarking running the UDP tracker. +- Internal torrents repository benchmarking. + +## E2E benchmarking + +We are using the scripts provided by [aquatic](https://github.com/greatest-ape/aquatic). + +How to install both commands: + +```console +cargo install aquatic_udp_load_test && cargo install aquatic_http_load_test +``` + +You can also clone and build the repos. It's the way used for the results shown +in this documentation. + +```console +git clone git@github.com:greatest-ape/aquatic.git +cd aquatic +cargo build --release -p aquatic_udp_load_test +``` + +### Run UDP load test + +Run the tracker with UDP service enabled and other services disabled and set log threshold to `error`. + +```toml +[logging] +threshold = "error" + +[[udp_trackers]] +bind_address = "0.0.0.0:6969" +``` + +Build and run the tracker: + +```console +cargo build --release +TORRUST_TRACKER_CONFIG_TOML_PATH="./share/default/config/tracker.udp.benchmarking.toml" ./target/release/torrust-tracker +``` + +Run the load test with: + +```console +./target/release/aquatic_udp_load_test +``` + +> NOTICE: You need to modify the port in the `udp_load_test` crate to use `6969` and rebuild. + +Output: + +```output +Starting client with config: Config { + server_address: 127.0.0.1:6969, + log_level: Error, + workers: 1, + duration: 0, + summarize_last: 0, + extra_statistics: true, + network: NetworkConfig { + multiple_client_ipv4s: true, + sockets_per_worker: 4, + recv_buffer: 8000000, + }, + requests: RequestConfig { + number_of_torrents: 1000000, + number_of_peers: 2000000, + scrape_max_torrents: 10, + announce_peers_wanted: 30, + weight_connect: 50, + weight_announce: 50, + weight_scrape: 1, + peer_seeder_probability: 0.75, + }, +} + +Requests out: 398367.11/second +Responses in: 358530.40/second + - Connect responses: 177567.60 + - Announce responses: 177508.08 + - Scrape responses: 3454.72 + - Error responses: 0.00 +Peers per announce response: 0.00 +Announce responses per info hash: + - p10: 1 + - p25: 1 + - p50: 1 + - p75: 1 + - p90: 2 + - p95: 3 + - p99: 105 + - p99.9: 289 + - p100: 361 +``` + +> IMPORTANT: The performance of the Torrust UDP Tracker is drastically decreased with these log threshold: `info`, `debug`, `trace`. + +```output +Requests out: 40719.21/second +Responses in: 33762.72/second + - Connect responses: 16732.76 + - Announce responses: 16692.98 + - Scrape responses: 336.98 + - Error responses: 0.00 +Peers per announce response: 0.00 +Announce responses per info hash: + - p10: 1 + - p25: 1 + - p50: 1 + - p75: 1 + - p90: 7 + - p95: 14 + - p99: 27 + - p99.9: 35 + - p100: 45 +``` + +### Comparing UDP tracker with other Rust implementations + +#### Aquatic UDP Tracker + +Running the tracker: + +```console +git clone git@github.com:greatest-ape/aquatic.git +cd aquatic +cargo build --release -p aquatic_udp +./target/release/aquatic_udp -p > "aquatic-udp-config.toml" +./target/release/aquatic_udp -c "aquatic-udp-config.toml" +``` + +Run the load test with: + +```console +./target/release/aquatic_udp_load_test +``` + +```output +Requests out: 432896.42/second +Responses in: 389577.70/second + - Connect responses: 192864.02 + - Announce responses: 192817.55 + - Scrape responses: 3896.13 + - Error responses: 0.00 +Peers per announce response: 21.55 +Announce responses per info hash: + - p10: 1 + - p25: 1 + - p50: 1 + - p75: 1 + - p90: 2 + - p95: 3 + - p99: 105 + - p99.9: 311 + - p100: 395 +``` + +#### Torrust-Actix UDP Tracker + +Run the tracker with UDP service enabled and other services disabled and set log threshold to `error`. + +```toml +[logging] +threshold = "error" + +[[udp_trackers]] +bind_address = "0.0.0.0:6969" +``` + +```console +git clone https://github.com/Power2All/torrust-actix.git +cd torrust-actix +cargo build --release +./target/release/torrust-actix --create-config +./target/release/torrust-actix +``` + +Run the load test with: + +```console +./target/release/aquatic_udp_load_test +``` + +> NOTICE: You need to modify the port in the `udp_load_test` crate to use `6969` and rebuild. + +```output +Requests out: 200953.97/second +Responses in: 180858.14/second + - Connect responses: 89517.13 + - Announce responses: 89539.67 + - Scrape responses: 1801.34 + - Error responses: 0.00 +Peers per announce response: 1.00 +Announce responses per info hash: + - p10: 1 + - p25: 1 + - p50: 1 + - p75: 1 + - p90: 2 + - p95: 7 + - p99: 87 + - p99.9: 155 + - p100: 188 +``` + +### Results + +Announce request per second: + +| Tracker | Announce | +|---------------|-----------| +| Aquatic | 192,817 | +| Torrust | 177,508 | +| Torrust-Actix | 89,539 | + +Using a PC with: + +- RAM: 64GiB +- Processor: AMD Ryzen 9 7950X x 32 +- Graphics: AMD Radeon Graphics / Intel Arc A770 Graphics (DG2) +- OS: Ubuntu 23.04 +- OS Type: 64-bit +- Kernel Version: Linux 6.2.0-20-generic + +## Repository benchmarking + +### Requirements + +You need to install the `gnuplot` package. + +```console +sudo apt install gnuplot +``` + +### Run + +You can run it with: + +```console +cargo bench -p torrust-tracker-torrent-repository +``` + +It tests the different implementations for the internal torrent storage. The output should be something like this: + +```output + Running benches/repository_benchmark.rs (target/release/deps/repository_benchmark-2f7830898bbdfba4) +add_one_torrent/RwLockStd + time: [60.936 ns 61.383 ns 61.764 ns] +Found 24 outliers among 100 measurements (24.00%) + 15 (15.00%) high mild + 9 (9.00%) high severe +add_one_torrent/RwLockStdMutexStd + time: [60.829 ns 60.937 ns 61.053 ns] +Found 1 outliers among 100 measurements (1.00%) + 1 (1.00%) high severe +add_one_torrent/RwLockStdMutexTokio + time: [96.034 ns 96.243 ns 96.545 ns] +Found 6 outliers among 100 measurements (6.00%) + 4 (4.00%) high mild + 2 (2.00%) high severe +add_one_torrent/RwLockTokio + time: [108.25 ns 108.66 ns 109.06 ns] +Found 2 outliers among 100 measurements (2.00%) + 2 (2.00%) low mild +add_one_torrent/RwLockTokioMutexStd + time: [109.03 ns 109.11 ns 109.19 ns] +Found 4 outliers among 100 measurements (4.00%) + 1 (1.00%) low mild + 1 (1.00%) high mild + 2 (2.00%) high severe +Benchmarking add_one_torrent/RwLockTokioMutexTokio: Collecting 100 samples in estimated 1.0003 s (7.1M iterationsadd_one_torrent/RwLockTokioMutexTokio + time: [139.64 ns 140.11 ns 140.62 ns] +``` + +After running it you should have a new directory containing the criterion reports: + +```console +target/criterion/ +├── add_multiple_torrents_in_parallel +├── add_one_torrent +├── report +├── update_multiple_torrents_in_parallel +└── update_one_torrent_in_parallel +``` + +You can see one report for each of the operations we are considering for benchmarking: + +- Add multiple torrents in parallel. +- Add one torrent. +- Update multiple torrents in parallel. +- Update one torrent in parallel. + +Each report look like the following: + +![Torrent repository implementations benchmarking report](./media/torrent-repository-implementations-benchmarking-report.png) + +## Other considerations + +If you are interested in knowing more about the tracker performance or contribute to improve its performance you ca join the [performance optimizations discussion](https://github.com/torrust/torrust-tracker/discussions/774). diff --git a/docs/containers.md b/docs/containers.md index 737ce40a0..cddd2ba98 100644 --- a/docs/containers.md +++ b/docs/containers.md @@ -1,10 +1,10 @@ # Containers (Docker or Podman) ## Demo environment + It is simple to setup the tracker with the default configuration and run it using the pre-built public docker image: - With Docker: ```sh @@ -14,14 +14,15 @@ docker run -it torrust/tracker:latest or with Podman: ```sh -podman run -it torrust/tracker:latest +podman run -it docker.io/torrust/tracker:latest ``` - ## Requirements + - Tested with recent versions of Docker or Podman. ## Volumes + The [Containerfile](../Containerfile) (i.e. the Dockerfile) Defines Three Volumes: ```Dockerfile @@ -38,7 +39,8 @@ When instancing the container image with the `docker run` or `podman run` comman > NOTE: You can adjust this mapping for your preference, however this mapping is the default in our guides and scripts. -### Pre-Create Host-Mapped Folders: +### Pre-Create Host-Mapped Folders + Please run this command where you wish to run the container: ```sh @@ -46,11 +48,13 @@ mkdir -p ./storage/tracker/lib/ ./storage/tracker/log/ ./storage/tracker/etc/ ``` ### Matching Ownership ID's of Host Storage and Container Volumes + It is important that the `torrust` user has the same uid `$(id -u)` as the host mapped folders. In our [entry script](../share/container/entry_script_sh), installed to `/usr/local/bin/entry.sh` inside the container, switches to the `torrust` user created based upon the `USER_UID` environmental variable. When running the container, you may use the `--env USER_ID="$(id -u)"` argument that gets the current user-id and passes to the container. ### Mapped Tree Structure + Using the standard mapping defined above produces this following mapped tree: ```s @@ -78,6 +82,7 @@ git clone https://github.com/torrust/torrust-tracker.git; cd torrust-tracker ``` ### (Docker) Setup Context + Before starting, if you are using docker, it is helpful to reset the context to the default: ```sh @@ -107,6 +112,7 @@ podman build --target debug --tag torrust-tracker:debug --file Containerfile . ## Running the Container ### Basic Run + No arguments are needed for simply checking the container image works: #### (Docker) Run Basic @@ -118,37 +124,41 @@ docker run -it torrust-tracker:release # Debug Mode docker run -it torrust-tracker:debug ``` + #### (Podman) Run Basic ```sh # Release Mode -podman run -it torrust-tracker:release +podman run -it docker.io/torrust-tracker:release # Debug Mode -podman run -it torrust-tracker:debug +podman run -it docker.io/torrust-tracker:debug ``` ### Arguments + The arguments need to be placed before the image tag. i.e. `run [arguments] torrust-tracker:release` -#### Environmental Variables: +#### Environmental Variables + Environmental variables are loaded through the `--env`, in the format `--env VAR="value"`. The following environmental variables can be set: -- `TORRUST_TRACKER_PATH_CONFIG` - The in-container path to the tracker configuration file, (default: `"/etc/torrust/tracker/tracker.toml"`). -- `TORRUST_TRACKER_API_ADMIN_TOKEN` - Override of the admin token. If set, this value overrides any value set in the config. -- `TORRUST_TRACKER_DATABASE_DRIVER` - The database type used for the container, (options: `sqlite3`, `mysql`, default `sqlite3`). Please Note: This dose not override the database configuration within the `.toml` config file. -- `TORRUST_TRACKER_CONFIG` - Load config from this environmental variable instead from a file, (i.e: `TORRUST_TRACKER_CONFIG=$(cat tracker-tracker.toml)`). +- `TORRUST_TRACKER_CONFIG_TOML_PATH` - The in-container path to the tracker configuration file, (default: `"/etc/torrust/tracker/tracker.toml"`). +- `TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN` - Override of the admin token. If set, this value overrides any value set in the config. +- `TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER` - The database type used for the container, (options: `sqlite3`, `mysql`, default `sqlite3`). Please Note: This dose not override the database configuration within the `.toml` config file. +- `TORRUST_TRACKER_CONFIG_TOML` - Load config from this environmental variable instead from a file, (i.e: `TORRUST_TRACKER_CONFIG_TOML=$(cat tracker-tracker.toml)`). - `USER_ID` - The user id for the runtime crated `torrust` user. Please Note: This user id should match the ownership of the host-mapped volumes, (default `1000`). - `UDP_PORT` - The port for the UDP tracker. This should match the port used in the configuration, (default `6969`). - `HTTP_PORT` - The port for the HTTP tracker. This should match the port used in the configuration, (default `7070`). - `API_PORT` - The port for the tracker API. This should match the port used in the configuration, (default `1212`). - +- `HEALTH_CHECK_API_PORT` - The port for the Health Check API. This should match the port used in the configuration, (default `1313`). ### Sockets + Socket ports used internally within the container can be mapped to with the `--publish` argument. The format is: `--publish [optional_host_ip]:[host_port]:[container_port]/[optional_protocol]`, for example: `--publish 127.0.0.1:8080:80/tcp`. @@ -163,7 +173,8 @@ The default ports can be mapped with the following: > NOTE: Inside the container it is necessary to expose a socket with the wildcard address `0.0.0.0` so that it may be accessible from the host. Verify that the configuration that the sockets are wildcard. -### Volumes +### Host-mapped Volumes + By default the container will use install volumes for `/var/lib/torrust/tracker`, `/var/log/torrust/tracker`, and `/etc/torrust/tracker`, however for better administration it good to make these volumes host-mapped. The argument to host-map volumes is `--volume`, with the format: `--volume=[host-src:]container-dest[:]`. @@ -176,10 +187,9 @@ The default mapping can be supplied with the following arguments: --volume ./storage/tracker/etc:/etc/torrust/tracker:Z \ ``` - Please not the `:Z` at the end of the podman `--volume` mapping arguments, this is to give read-write permission on SELinux enabled systemd, if this doesn't work on your system, you can use `:rw` instead. -## Complete Example: +## Complete Example ### With Docker @@ -195,7 +205,7 @@ mkdir -p ./storage/tracker/lib/ ./storage/tracker/log/ ./storage/tracker/etc/ ## Run Torrust Tracker Container Image docker run -it \ - --env TORRUST_TRACKER_API_ADMIN_TOKEN="MySecretToken" \ + --env TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN="MySecretToken" \ --env USER_ID="$(id -u)" \ --publish 0.0.0.0:7070:7070/tcp \ --publish 0.0.0.0:6969:6969/udp \ @@ -217,7 +227,7 @@ mkdir -p ./storage/tracker/lib/ ./storage/tracker/log/ ./storage/tracker/etc/ ## Run Torrust Tracker Container Image podman run -it \ - --env TORRUST_TRACKER_API_ADMIN_TOKEN="MySecretToken" \ + --env TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN="MySecretToken" \ --env USER_ID="$(id -u)" \ --publish 0.0.0.0:7070:7070/tcp \ --publish 0.0.0.0:6969:6969/udp \ @@ -225,7 +235,7 @@ podman run -it \ --volume ./storage/tracker/lib:/var/lib/torrust/tracker:Z \ --volume ./storage/tracker/log:/var/log/torrust/tracker:Z \ --volume ./storage/tracker/etc:/etc/torrust/tracker:Z \ - torrust-tracker:release + docker.io/torrust-tracker:release ``` ## Docker Compose @@ -233,8 +243,9 @@ podman run -it \ The docker-compose configuration includes the MySQL service configuration. If you want to use MySQL instead of SQLite you should verify the `/etc/torrust/tracker/tracker.toml` (i.e `./storage/tracker/etc/tracker.toml`) configuration: ```toml -db_driver = "MySQL" -db_path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" +[core.database] +driver = "mysql" +path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" ``` ### Build and Run: @@ -245,7 +256,7 @@ docker build --target release --tag torrust-tracker:release --file Containerfile mkdir -p ./storage/tracker/lib/ ./storage/tracker/log/ ./storage/tracker/etc/ USER_ID=$(id -u) \ - TORRUST_TRACKER_API_ADMIN_TOKEN="MySecretToken" \ + TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN="MySecretToken" \ docker compose up --build ``` @@ -256,7 +267,7 @@ $ docker ps CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 06feacb91a9e torrust-tracker "cargo run" 18 minutes ago Up 4 seconds 0.0.0.0:1212->1212/tcp, :::1212->1212/tcp, 0.0.0.0:7070->7070/tcp, :::7070->7070/tcp, 0.0.0.0:6969->6969/udp, :::6969->6969/udp torrust-tracker-1 34d29e792ee2 mysql:8.0 "docker-entrypoint.s…" 18 minutes ago Up 5 seconds (healthy) 0.0.0.0:3306->3306/tcp, :::3306->3306/tcp, 33060/tcp torrust-mysql-1 -``` +``` And you should be able to use the application, for example making a request to the API: @@ -319,24 +330,23 @@ The storage folder must contain your certificates: ```s storage/tracker/lib/tls - ├── localhost.crt - └── localhost.key + ├── localhost.crt + └── localhost.key +storage/http_api/lib/tls + ├── localhost.crt + └── localhost.key ``` You have not enabled it in your `tracker.toml` file: ```toml +[http_trackers.tsl_config] +ssl_cert_path = "./storage/tracker/lib/tls/localhost.crt" +ssl_key_path = "./storage/tracker/lib/tls/localhost.key" -[[http_trackers]] -# ... -ssl_enabled = true -# ... - -[http_api] -# ... -ssl_enabled = true -# ... - +[http_api.tsl_config] +ssl_cert_path = "./storage/http_api/lib/tls/localhost.crt" +ssl_key_path = "./storage/http_api/lib/tls/localhost.key" ``` > NOTE: you can enable it independently for each HTTP tracker or the API. diff --git a/LICENSE-AGPL_3_0 b/docs/licenses/LICENSE-AGPL_3_0 similarity index 100% rename from LICENSE-AGPL_3_0 rename to docs/licenses/LICENSE-AGPL_3_0 diff --git a/LICENSE-MIT_0 b/docs/licenses/LICENSE-MIT_0 similarity index 100% rename from LICENSE-MIT_0 rename to docs/licenses/LICENSE-MIT_0 diff --git a/docs/media/flamegraph.svg b/docs/media/flamegraph.svg new file mode 100644 index 000000000..58387ee06 --- /dev/null +++ b/docs/media/flamegraph.svg @@ -0,0 +1,491 @@ +Flame Graph Reset ZoomSearch merge_sched_in (93 samples, 0.02%)event_sched_in (68 samples, 0.02%)perf_ibs_add (50 samples, 0.01%)perf_ibs_start (41 samples, 0.01%)ctx_sched_in (117 samples, 0.03%)visit_groups_merge.constprop.0.isra.0 (114 samples, 0.03%)finish_task_switch.isra.0 (122 samples, 0.03%)__perf_event_task_sched_in (119 samples, 0.03%)profiling (170 samples, 0.04%)ret_from_fork (126 samples, 0.03%)schedule_tail (126 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (81 samples, 0.02%)[[vdso]] (750 samples, 0.20%)<torrust_tracker::shared::crypto::ephemeral_instance_keys::RANDOM_SEED as core::ops::deref::Deref>::deref::__stability::LAZY (889 samples, 0.23%)<alloc::collections::btree::map::Values<K,V> as core::iter::traits::iterator::Iterator>::next (99 samples, 0.03%)<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (94 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Immut,K,V>::next_unchecked (84 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<BorrowType,K,V>::init_front (62 samples, 0.02%)[[vdso]] (968 samples, 0.26%)__GI___clock_gettime (58 samples, 0.02%)__memcpy_avx512_unaligned_erms (143 samples, 0.04%)_int_free (38 samples, 0.01%)_int_malloc (178 samples, 0.05%)core::ptr::drop_in_place<[core::option::Option<core::task::wake::Waker>: 32]> (39 samples, 0.01%)epoll_wait (676 samples, 0.18%)tokio::runtime::context::with_scheduler (85 samples, 0.02%)core::option::Option<T>::map (65 samples, 0.02%)mio::sys::unix::selector::epoll::Selector::select::{{closure}} (60 samples, 0.02%)mio::poll::Poll::poll (84 samples, 0.02%)mio::sys::unix::selector::epoll::Selector::select (84 samples, 0.02%)tokio::runtime::io::driver::Driver::turn (164 samples, 0.04%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (48 samples, 0.01%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (76 samples, 0.02%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (50 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (113 samples, 0.03%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (415 samples, 0.11%)core::sync::atomic::AtomicUsize::fetch_add (410 samples, 0.11%)core::sync::atomic::atomic_add (410 samples, 0.11%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (475 samples, 0.13%)tokio::runtime::driver::Handle::unpark (40 samples, 0.01%)tokio::runtime::driver::IoHandle::unpark (40 samples, 0.01%)__entry_text_start (99 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (180 samples, 0.05%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (169 samples, 0.04%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark_condvar (124 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (123 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (60 samples, 0.02%)tokio::runtime::task::list::OwnedTasks<S>::remove (53 samples, 0.01%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (53 samples, 0.01%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (122 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (75 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (287 samples, 0.08%)tokio::runtime::scheduler::multi_thread::park::Parker::park (232 samples, 0.06%)tokio::runtime::scheduler::multi_thread::park::Inner::park (232 samples, 0.06%)core::cell::RefCell<T>::borrow_mut (67 samples, 0.02%)core::cell::RefCell<T>::try_borrow_mut (67 samples, 0.02%)core::cell::BorrowRefMut::new (67 samples, 0.02%)tokio::runtime::coop::budget (86 samples, 0.02%)tokio::runtime::coop::with_budget (86 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (80 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (372 samples, 0.10%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_searching (240 samples, 0.06%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::transition_worker_from_searching (133 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (103 samples, 0.03%)tokio::runtime::task::harness::Harness<T,S>::dealloc (46 samples, 0.01%)core::mem::drop (41 samples, 0.01%)core::ptr::drop_in_place<alloc::boxed::Box<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>>> (41 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::end_processing_scheduled_tasks (77 samples, 0.02%)<T as core::slice::cmp::SliceContains>::slice_contains::{{closure}} (210 samples, 0.06%)core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq (210 samples, 0.06%)core::slice::<impl [T]>::contains (521 samples, 0.14%)<T as core::slice::cmp::SliceContains>::slice_contains (521 samples, 0.14%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::any (521 samples, 0.14%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (125 samples, 0.03%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (125 samples, 0.03%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (586 samples, 0.15%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (617 samples, 0.16%)tokio::runtime::scheduler::multi_thread::worker::Context::park (767 samples, 0.20%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_parked (115 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_if_work_pending (78 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::is_empty (56 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::is_empty (52 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::len (45 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task (38 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (105 samples, 0.03%)core::num::<impl u32>::wrapping_add (129 samples, 0.03%)core::sync::atomic::AtomicU64::compare_exchange (138 samples, 0.04%)core::sync::atomic::atomic_compare_exchange (138 samples, 0.04%)tokio::runtime::scheduler::multi_thread::queue::pack (311 samples, 0.08%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (745 samples, 0.20%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (778 samples, 0.21%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (821 samples, 0.22%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (2,036 samples, 0.54%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (2,036 samples, 0.54%)tokio::runtime::scheduler::multi_thread::worker::run (2,036 samples, 0.54%)tokio::runtime::context::runtime::enter_runtime (2,036 samples, 0.54%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (2,036 samples, 0.54%)tokio::runtime::context::set_scheduler (2,036 samples, 0.54%)std::thread::local::LocalKey<T>::with (2,036 samples, 0.54%)std::thread::local::LocalKey<T>::try_with (2,036 samples, 0.54%)tokio::runtime::context::set_scheduler::{{closure}} (2,036 samples, 0.54%)tokio::runtime::context::scoped::Scoped<T>::set (2,036 samples, 0.54%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (2,036 samples, 0.54%)tokio::runtime::scheduler::multi_thread::worker::Context::run (2,036 samples, 0.54%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (2,422 samples, 0.64%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (2,422 samples, 0.64%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (58 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (58 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::poll (2,584 samples, 0.68%)tokio::runtime::task::core::Core<T,S>::drop_future_or_output (160 samples, 0.04%)tokio::runtime::task::core::Core<T,S>::set_stage (138 samples, 0.04%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (2,724 samples, 0.72%)tokio::runtime::task::harness::poll_future::{{closure}} (2,724 samples, 0.72%)tokio::runtime::task::core::Core<T,S>::store_output (140 samples, 0.04%)tokio::runtime::task::harness::poll_future (2,796 samples, 0.74%)std::panic::catch_unwind (2,788 samples, 0.74%)std::panicking::try (2,788 samples, 0.74%)std::panicking::try::do_call (2,784 samples, 0.74%)core::mem::manually_drop::ManuallyDrop<T>::take (60 samples, 0.02%)core::ptr::read (60 samples, 0.02%)tokio::runtime::task::raw::poll (2,887 samples, 0.76%)tokio::runtime::task::harness::Harness<T,S>::poll (2,876 samples, 0.76%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (2,876 samples, 0.76%)tokio::runtime::task::state::State::transition_to_running (74 samples, 0.02%)tokio::runtime::task::state::State::fetch_update_action (74 samples, 0.02%)core::array::<impl core::default::Default for [T: 32]>::default (83 samples, 0.02%)tokio::runtime::time::wheel::Wheel::poll (58 samples, 0.02%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (325 samples, 0.09%)tokio::runtime::time::Driver::park_internal (147 samples, 0.04%)tokio::runtime::time::wheel::Wheel::next_expiration (53 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (43 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (112 samples, 0.03%)alloc::vec::from_elem (42 samples, 0.01%)<u8 as alloc::vec::spec_from_elem::SpecFromElem>::from_elem (42 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::with_capacity_zeroed_in (42 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (42 samples, 0.01%)<alloc::alloc::Global as core::alloc::Allocator>::allocate_zeroed (42 samples, 0.01%)alloc::alloc::Global::alloc_impl (42 samples, 0.01%)alloc::alloc::alloc_zeroed (42 samples, 0.01%)__rdl_alloc_zeroed (42 samples, 0.01%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc_zeroed (42 samples, 0.01%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (318 samples, 0.08%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (73 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (61 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (85 samples, 0.02%)std::sync::rwlock::RwLock<T>::read (80 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read (63 samples, 0.02%)[[heap]] (8,241 samples, 2.18%)[..[[vdso]] (1,241 samples, 0.33%)<alloc::collections::btree::map::Values<K,V> as core::iter::traits::iterator::Iterator>::next (96 samples, 0.03%)<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (69 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Immut,K,V>::next_unchecked (43 samples, 0.01%)<alloc::string::String as core::fmt::Write>::write_char (41 samples, 0.01%)<alloc::string::String as core::fmt::Write>::write_str (379 samples, 0.10%)alloc::string::String::push_str (45 samples, 0.01%)alloc::vec::Vec<T,A>::extend_from_slice (45 samples, 0.01%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (45 samples, 0.01%)alloc::vec::Vec<T,A>::append_elements (45 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (66 samples, 0.02%)core::num::<impl u64>::rotate_left (48 samples, 0.01%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (148 samples, 0.04%)core::num::<impl u64>::wrapping_add (42 samples, 0.01%)core::hash::sip::u8to64_le (134 samples, 0.04%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (506 samples, 0.13%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (54 samples, 0.01%)<tokio::future::poll_fn::PollFn<F> as core::future::future::Future>::poll (49 samples, 0.01%)core::ops::function::FnMut::call_mut (41 samples, 0.01%)tokio::runtime::coop::poll_proceed (41 samples, 0.01%)tokio::runtime::context::budget (41 samples, 0.01%)std::thread::local::LocalKey<T>::try_with (41 samples, 0.01%)tokio::io::ready::Ready::intersection (48 samples, 0.01%)tokio::io::ready::Ready::from_interest (46 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (297 samples, 0.08%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::io::scheduled_io::Waiters>> (83 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (83 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::unlock (67 samples, 0.02%)core::result::Result<T,E>::is_err (375 samples, 0.10%)core::result::Result<T,E>::is_ok (375 samples, 0.10%)tokio::loom::std::mutex::Mutex<T>::lock (493 samples, 0.13%)std::sync::mutex::Mutex<T>::lock (463 samples, 0.12%)std::sys::sync::mutex::futex::Mutex::lock (443 samples, 0.12%)core::sync::atomic::AtomicU32::compare_exchange (51 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (51 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (1,400 samples, 0.37%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (50 samples, 0.01%)[[vdso]] (3,493 samples, 0.92%)[profiling] (68 samples, 0.02%)core::fmt::write (51 samples, 0.01%)__GI___clock_gettime (73 samples, 0.02%)__GI___libc_free (449 samples, 0.12%)arena_for_chunk (85 samples, 0.02%)arena_for_chunk (71 samples, 0.02%)heap_for_ptr (67 samples, 0.02%)heap_max_size (49 samples, 0.01%)__GI___libc_malloc (293 samples, 0.08%)__GI___lll_lock_wait_private (144 samples, 0.04%)futex_wait (95 samples, 0.03%)__GI___lll_lock_wake_private (479 samples, 0.13%)__GI___pthread_disable_asynccancel (90 samples, 0.02%)__GI_getsockname (1,281 samples, 0.34%)__libc_calloc (42 samples, 0.01%)__libc_recvfrom (121 samples, 0.03%)__libc_sendto (602 samples, 0.16%)__memchr_evex (56 samples, 0.01%)__memcmp_evex_movbe (1,539 samples, 0.41%)__memcpy_avx512_unaligned_erms (1,154 samples, 0.30%)__memset_avx512_unaligned_erms (1,515 samples, 0.40%)__posix_memalign (131 samples, 0.03%)__posix_memalign (85 samples, 0.02%)_mid_memalign (85 samples, 0.02%)_int_free (1,524 samples, 0.40%)_int_malloc (1,484 samples, 0.39%)_int_memalign (156 samples, 0.04%)alloc::collections::btree::map::BTreeMap<K,V,A>::entry (82 samples, 0.02%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (64 samples, 0.02%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (64 samples, 0.02%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (64 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::insert (161 samples, 0.04%)alloc::raw_vec::RawVec<T,A>::grow_one (44 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (146 samples, 0.04%)alloc::raw_vec::RawVec<T,A>::grow_amortized (101 samples, 0.03%)alloc::raw_vec::finish_grow (199 samples, 0.05%)core::ptr::drop_in_place<aquatic_udp_protocol::response::Response> (166 samples, 0.04%)core::ptr::drop_in_place<tokio::net::udp::UdpSocket::send_to<&core::net::socket_addr::SocketAddr>::{{closure}}> (57 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (373 samples, 0.10%)core::ptr::drop_in_place<torrust_tracker::servers::udp::server::Udp::send_response::{{closure}}> (61 samples, 0.02%)malloc_consolidate (373 samples, 0.10%)<ppv_lite86::soft::x2<W,G> as core::ops::arith::AddAssign>::add_assign (62 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as core::ops::arith::AddAssign>::add_assign (62 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as core::ops::arith::Add>::add (62 samples, 0.02%)core::core_arch::x86::avx2::_mm256_add_epi32 (62 samples, 0.02%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right16 (46 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right16 (46 samples, 0.01%)core::core_arch::x86::avx2::_mm256_shuffle_epi8 (46 samples, 0.01%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (40 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (40 samples, 0.01%)rand_chacha::guts::round (244 samples, 0.06%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right25 (45 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right25 (45 samples, 0.01%)core::core_arch::x86::avx2::_mm256_or_si256 (45 samples, 0.01%)rand_chacha::guts::refill_wide::impl_avx2 (346 samples, 0.09%)rand_chacha::guts::refill_wide::fn_impl (345 samples, 0.09%)rand_chacha::guts::refill_wide_impl (345 samples, 0.09%)tokio::runtime::context::with_scheduler (45 samples, 0.01%)tokio::runtime::io::driver::Driver::turn (50 samples, 0.01%)__entry_text_start (235 samples, 0.06%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (2,041 samples, 0.54%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (957 samples, 0.25%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (255 samples, 0.07%)core::sync::atomic::AtomicUsize::fetch_add (100 samples, 0.03%)core::sync::atomic::atomic_add (100 samples, 0.03%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (155 samples, 0.04%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (108 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (84 samples, 0.02%)tokio::runtime::task::core::Header::get_owner_id (38 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with (38 samples, 0.01%)tokio::runtime::task::core::Header::get_owner_id::{{closure}} (38 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (43 samples, 0.01%)tokio::runtime::task::list::OwnedTasks<S>::remove (39 samples, 0.01%)torrust_tracker::servers::udp::handlers::RequestId::make (173 samples, 0.05%)__entry_text_start (171 samples, 0.05%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (293 samples, 0.08%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (287 samples, 0.08%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (230 samples, 0.06%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (230 samples, 0.06%)tokio::runtime::task::core::Core<T,S>::set_stage (456 samples, 0.12%)core::sync::atomic::AtomicUsize::fetch_xor (54 samples, 0.01%)core::sync::atomic::atomic_xor (54 samples, 0.01%)tokio::runtime::task::harness::Harness<T,S>::complete (73 samples, 0.02%)tokio::runtime::task::state::State::transition_to_complete (57 samples, 0.02%)std::sync::poison::Flag::done (103 samples, 0.03%)core::ptr::drop_in_place<tokio::util::sharded_list::ShardGuard<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::current_thread::Handle>>,tokio::runtime::task::core::Header>> (136 samples, 0.04%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::util::linked_list::LinkedList<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::current_thread::Handle>>,tokio::runtime::task::core::Header>>> (136 samples, 0.04%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (136 samples, 0.04%)tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (242 samples, 0.06%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::push_front (60 samples, 0.02%)core::result::Result<T,E>::is_err (101 samples, 0.03%)core::result::Result<T,E>::is_ok (101 samples, 0.03%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (599 samples, 0.16%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::lock_shard (255 samples, 0.07%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (255 samples, 0.07%)tokio::loom::std::mutex::Mutex<T>::lock (252 samples, 0.07%)std::sync::mutex::Mutex<T>::lock (252 samples, 0.07%)std::sys::sync::mutex::futex::Mutex::lock (251 samples, 0.07%)core::sync::atomic::AtomicU32::compare_exchange (150 samples, 0.04%)core::sync::atomic::atomic_compare_exchange (150 samples, 0.04%)std::sync::poison::Flag::done (100 samples, 0.03%)std::thread::panicking (55 samples, 0.01%)std::panicking::panicking (55 samples, 0.01%)std::panicking::panic_count::count_is_zero (55 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (173 samples, 0.05%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (173 samples, 0.05%)std::sys::sync::mutex::futex::Mutex::unlock (73 samples, 0.02%)<T as core::slice::cmp::SliceContains>::slice_contains::{{closure}} (293 samples, 0.08%)core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq (293 samples, 0.08%)core::slice::<impl [T]>::contains (631 samples, 0.17%)<T as core::slice::cmp::SliceContains>::slice_contains (631 samples, 0.17%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::any (631 samples, 0.17%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (134 samples, 0.04%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (134 samples, 0.04%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (844 samples, 0.22%)tokio::loom::std::mutex::Mutex<T>::lock (38 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (858 samples, 0.23%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (189 samples, 0.05%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (189 samples, 0.05%)std::sys::sync::mutex::futex::Mutex::unlock (181 samples, 0.05%)core::sync::atomic::AtomicU32::swap (65 samples, 0.02%)core::sync::atomic::atomic_swap (65 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (44 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (43 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::lock (38 samples, 0.01%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_parked (253 samples, 0.07%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (42 samples, 0.01%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (42 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (48 samples, 0.01%)alloc::sync::Arc<T,A>::inner (48 samples, 0.01%)core::ptr::non_null::NonNull<T>::as_ref (48 samples, 0.01%)core::sync::atomic::AtomicU32::load (44 samples, 0.01%)core::sync::atomic::atomic_load (44 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_if_work_pending (356 samples, 0.09%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::is_empty (216 samples, 0.06%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::is_empty (168 samples, 0.04%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::len (98 samples, 0.03%)core::sync::atomic::AtomicU64::load (54 samples, 0.01%)core::sync::atomic::atomic_load (54 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park (1,635 samples, 0.43%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_parked (667 samples, 0.18%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (66 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (61 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (96 samples, 0.03%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (1,751 samples, 0.46%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (1,751 samples, 0.46%)tokio::runtime::scheduler::multi_thread::worker::run (1,751 samples, 0.46%)tokio::runtime::context::runtime::enter_runtime (1,751 samples, 0.46%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (1,751 samples, 0.46%)tokio::runtime::context::set_scheduler (1,751 samples, 0.46%)std::thread::local::LocalKey<T>::with (1,751 samples, 0.46%)std::thread::local::LocalKey<T>::try_with (1,751 samples, 0.46%)tokio::runtime::context::set_scheduler::{{closure}} (1,751 samples, 0.46%)tokio::runtime::context::scoped::Scoped<T>::set (1,751 samples, 0.46%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (1,751 samples, 0.46%)tokio::runtime::scheduler::multi_thread::worker::Context::run (1,751 samples, 0.46%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (1,772 samples, 0.47%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (1,772 samples, 0.47%)tokio::runtime::task::raw::poll (1,805 samples, 0.48%)tokio::runtime::task::harness::Harness<T,S>::poll (1,787 samples, 0.47%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (1,787 samples, 0.47%)tokio::runtime::task::harness::poll_future (1,787 samples, 0.47%)std::panic::catch_unwind (1,787 samples, 0.47%)std::panicking::try (1,787 samples, 0.47%)std::panicking::try::do_call (1,787 samples, 0.47%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (1,787 samples, 0.47%)tokio::runtime::task::harness::poll_future::{{closure}} (1,787 samples, 0.47%)tokio::runtime::task::core::Core<T,S>::poll (1,787 samples, 0.47%)tokio::runtime::time::wheel::level::Level::next_expiration (54 samples, 0.01%)tokio::runtime::time::wheel::Wheel::next_expiration (72 samples, 0.02%)torrust_tracker::core::Tracker::send_stats_event::{{closure}} (50 samples, 0.01%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (38 samples, 0.01%)<std::hash::random::DefaultHasher as core::hash::Hasher>::finish (157 samples, 0.04%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::finish (157 samples, 0.04%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::finish (157 samples, 0.04%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::d_rounds (89 samples, 0.02%)<torrust_tracker_clock::time_extent::TimeExtent as core::hash::Hash>::hash (47 samples, 0.01%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (265 samples, 0.07%)torrust_tracker::servers::udp::peer_builder::from_request (70 samples, 0.02%)torrust_tracker::servers::udp::request::AnnounceWrapper::new (101 samples, 0.03%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (74 samples, 0.02%)core::sync::atomic::AtomicUsize::fetch_add (48 samples, 0.01%)core::sync::atomic::atomic_add (48 samples, 0.01%)core::ptr::drop_in_place<alloc::sync::Arc<tokio::net::udp::UdpSocket>> (88 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (88 samples, 0.02%)core::result::Result<T,E>::map_err (52 samples, 0.01%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (269 samples, 0.07%)torrust_tracker::core::Tracker::announce::{{closure}} (308 samples, 0.08%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (372 samples, 0.10%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (448 samples, 0.12%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (73 samples, 0.02%)core::fmt::num::imp::fmt_u64 (69 samples, 0.02%)core::fmt::num::imp::<impl core::fmt::Display for i64>::fmt (41 samples, 0.01%)<T as alloc::string::ToString>::to_string (182 samples, 0.05%)core::option::Option<T>::expect (91 samples, 0.02%)torrust_tracker_primitives::info_hash::InfoHash::to_hex_string (41 samples, 0.01%)<T as alloc::string::ToString>::to_string (41 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (450 samples, 0.12%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (1,319 samples, 0.35%)torrust_tracker::servers::udp::logging::log_response (83 samples, 0.02%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (1,808 samples, 0.48%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (293 samples, 0.08%)torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (283 samples, 0.07%)tokio::net::udp::UdpSocket::send_to::{{closure}} (265 samples, 0.07%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (227 samples, 0.06%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (124 samples, 0.03%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (61 samples, 0.02%)mio::net::udp::UdpSocket::send_to (61 samples, 0.02%)mio::io_source::IoSource<T>::do_io (61 samples, 0.02%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (61 samples, 0.02%)mio::net::udp::UdpSocket::send_to::{{closure}} (61 samples, 0.02%)std::net::udp::UdpSocket::send_to (61 samples, 0.02%)std::sys_common::net::UdpSocket::send_to (61 samples, 0.02%)std::sys::pal::unix::cvt (61 samples, 0.02%)<isize as std::sys::pal::unix::IsMinusOne>::is_minus_one (61 samples, 0.02%)<core::iter::adapters::filter::Filter<I,P> as core::iter::traits::iterator::Iterator>::count::to_usize::{{closure}} (84 samples, 0.02%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats::{{closure}} (84 samples, 0.02%)torrust_tracker_primitives::peer::Peer::is_seeder (84 samples, 0.02%)<core::iter::adapters::filter::Filter<I,P> as core::iter::traits::iterator::Iterator>::count (173 samples, 0.05%)core::iter::traits::iterator::Iterator::sum (173 samples, 0.05%)<usize as core::iter::traits::accum::Sum>::sum (173 samples, 0.05%)<core::iter::adapters::map::Map<I,F> as core::iter::traits::iterator::Iterator>::fold (173 samples, 0.05%)core::iter::traits::iterator::Iterator::fold (173 samples, 0.05%)core::iter::adapters::map::map_fold::{{closure}} (85 samples, 0.02%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (337 samples, 0.09%)alloc::collections::btree::map::BTreeMap<K,V,A>::values (156 samples, 0.04%)core::mem::drop (39 samples, 0.01%)core::ptr::drop_in_place<core::option::Option<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (39 samples, 0.01%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (39 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (39 samples, 0.01%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::insert_or_update_peer_and_get_stats (750 samples, 0.20%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer_and_get_stats (691 samples, 0.18%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer (349 samples, 0.09%)core::option::Option<T>::is_some_and (106 samples, 0.03%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer::{{closure}} (105 samples, 0.03%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (101 samples, 0.03%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (101 samples, 0.03%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (61 samples, 0.02%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (84 samples, 0.02%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (84 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (181 samples, 0.05%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (181 samples, 0.05%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (181 samples, 0.05%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (181 samples, 0.05%)<u8 as core::slice::cmp::SliceOrd>::compare (181 samples, 0.05%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (76 samples, 0.02%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (551 samples, 0.15%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (506 samples, 0.13%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (506 samples, 0.13%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (572 samples, 0.15%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (751 samples, 0.20%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (64 samples, 0.02%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (83 samples, 0.02%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (83 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (216 samples, 0.06%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (216 samples, 0.06%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (216 samples, 0.06%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (216 samples, 0.06%)<u8 as core::slice::cmp::SliceOrd>::compare (216 samples, 0.06%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (87 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (539 samples, 0.14%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (527 samples, 0.14%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (501 samples, 0.13%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (501 samples, 0.13%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (640 samples, 0.17%)core::sync::atomic::AtomicU32::load (117 samples, 0.03%)core::sync::atomic::atomic_load (117 samples, 0.03%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (167 samples, 0.04%)std::sync::rwlock::RwLock<T>::read (162 samples, 0.04%)std::sys::sync::rwlock::futex::RwLock::read (158 samples, 0.04%)tracing::span::Span::log (82 samples, 0.02%)tracing::span::Span::record_all (143 samples, 0.04%)unlink_chunk (679 samples, 0.18%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (71 samples, 0.02%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (71 samples, 0.02%)rand_core::block::BlockRng<R>::generate_and_set (59 samples, 0.02%)rand::rng::Rng::gen (72 samples, 0.02%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (72 samples, 0.02%)rand::rng::Rng::gen (72 samples, 0.02%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (72 samples, 0.02%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (72 samples, 0.02%)[anon] (31,375 samples, 8.29%)[anon]uuid::v4::<impl uuid::Uuid>::new_v4 (90 samples, 0.02%)uuid::rng::bytes (87 samples, 0.02%)rand::random (87 samples, 0.02%)_int_free (938 samples, 0.25%)tcache_put (62 samples, 0.02%)hashbrown::raw::h2 (40 samples, 0.01%)hashbrown::raw::RawTable<T,A>::find_or_find_insert_slot (63 samples, 0.02%)hashbrown::raw::RawTableInner::find_or_find_insert_slot_inner (53 samples, 0.01%)hashbrown::map::HashMap<K,V,S,A>::insert (65 samples, 0.02%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (128 samples, 0.03%)[profiling] (1,452 samples, 0.38%)<alloc::collections::btree::map::Values<K,V> as core::iter::traits::iterator::Iterator>::next (104 samples, 0.03%)<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (89 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Immut,K,V>::next_unchecked (70 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<BorrowType,K,V>::init_front (42 samples, 0.01%)<alloc::string::String as core::fmt::Write>::write_char (41 samples, 0.01%)alloc::vec::Vec<T,A>::reserve (49 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::reserve (45 samples, 0.01%)<alloc::string::String as core::fmt::Write>::write_str (157 samples, 0.04%)alloc::string::String::push_str (107 samples, 0.03%)alloc::vec::Vec<T,A>::extend_from_slice (107 samples, 0.03%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (107 samples, 0.03%)alloc::vec::Vec<T,A>::append_elements (107 samples, 0.03%)core::num::<impl u64>::rotate_left (45 samples, 0.01%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (114 samples, 0.03%)core::hash::sip::u8to64_le (102 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (388 samples, 0.10%)<core::net::ip_addr::Ipv4Addr as core::hash::Hash>::hash (110 samples, 0.03%)core::array::<impl core::hash::Hash for [T: N]>::hash (109 samples, 0.03%)core::hash::impls::<impl core::hash::Hash for [T]>::hash (109 samples, 0.03%)core::hash::impls::<impl core::hash::Hash for u8>::hash_slice (93 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (93 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (93 samples, 0.02%)<core::net::socket_addr::SocketAddrV4 as core::hash::Hash>::hash (135 samples, 0.04%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (202 samples, 0.05%)tokio::runtime::context::CONTEXT::__getit (59 samples, 0.02%)core::cell::Cell<T>::get (59 samples, 0.02%)__entry_text_start (42 samples, 0.01%)<tokio::future::poll_fn::PollFn<F> as core::future::future::Future>::poll (164 samples, 0.04%)core::ops::function::FnMut::call_mut (149 samples, 0.04%)tokio::runtime::coop::poll_proceed (149 samples, 0.04%)tokio::runtime::context::budget (149 samples, 0.04%)std::thread::local::LocalKey<T>::try_with (149 samples, 0.04%)tokio::runtime::context::budget::{{closure}} (73 samples, 0.02%)tokio::runtime::coop::poll_proceed::{{closure}} (73 samples, 0.02%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (218 samples, 0.06%)__entry_text_start (55 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::io::scheduled_io::Waiters>> (89 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (89 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::unlock (65 samples, 0.02%)core::sync::atomic::AtomicU32::swap (41 samples, 0.01%)core::sync::atomic::atomic_swap (41 samples, 0.01%)std::sync::mutex::MutexGuard<T>::new (41 samples, 0.01%)std::sync::poison::Flag::guard (41 samples, 0.01%)std::thread::panicking (39 samples, 0.01%)std::panicking::panicking (39 samples, 0.01%)std::panicking::panic_count::count_is_zero (39 samples, 0.01%)core::result::Result<T,E>::is_err (337 samples, 0.09%)core::result::Result<T,E>::is_ok (337 samples, 0.09%)core::sync::atomic::AtomicU32::compare_exchange (60 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (60 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (483 samples, 0.13%)std::sync::mutex::Mutex<T>::lock (456 samples, 0.12%)std::sys::sync::mutex::futex::Mutex::lock (415 samples, 0.11%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (864 samples, 0.23%)__memcpy_avx512_unaligned_erms (223 samples, 0.06%)[profiling] (233 samples, 0.06%)binascii::bin2hex (128 samples, 0.03%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (404 samples, 0.11%)__entry_text_start (811 samples, 0.21%)[[vdso]] (12,846 samples, 3.40%)[[v..__GI___clock_gettime (141 samples, 0.04%)arena_for_chunk (178 samples, 0.05%)arena_for_chunk (151 samples, 0.04%)heap_for_ptr (116 samples, 0.03%)heap_max_size (50 samples, 0.01%)__GI___libc_free (827 samples, 0.22%)arena_for_chunk (86 samples, 0.02%)arena_for_chunk (67 samples, 0.02%)heap_for_ptr (39 samples, 0.01%)__GI___libc_malloc (618 samples, 0.16%)tcache_get (96 samples, 0.03%)__GI___libc_write (225 samples, 0.06%)__GI___libc_write (234 samples, 0.06%)__GI___lll_lock_wait_private (110 samples, 0.03%)futex_wait (66 samples, 0.02%)__GI___lll_lock_wake_private (138 samples, 0.04%)__GI___pthread_disable_asynccancel (209 samples, 0.06%)__GI___pthread_enable_asynccancel (46 samples, 0.01%)__entry_text_start (46 samples, 0.01%)compiler_builtins::float::conv::int_to_float::u128_to_f64_bits (138 samples, 0.04%)__floattidf (183 samples, 0.05%)compiler_builtins::float::conv::__floattidf (172 samples, 0.05%)exp_inline (152 samples, 0.04%)log_inline (148 samples, 0.04%)__ieee754_pow_fma (333 samples, 0.09%)__libc_calloc (299 samples, 0.08%)__libc_recvfrom (1,422 samples, 0.38%)__libc_sendto (881 samples, 0.23%)__memcmp_evex_movbe (277 samples, 0.07%)__memcpy_avx512_unaligned_erms (4,073 samples, 1.08%)__posix_memalign (367 samples, 0.10%)__posix_memalign (216 samples, 0.06%)_mid_memalign (206 samples, 0.05%)arena_for_chunk (38 samples, 0.01%)__pow (75 samples, 0.02%)__entry_text_start (468 samples, 0.12%)_int_free (2,282 samples, 0.60%)tcache_put (139 samples, 0.04%)_int_malloc (2,521 samples, 0.67%)_int_memalign (241 samples, 0.06%)alloc::collections::btree::map::BTreeMap<K,V,A>::insert (125 samples, 0.03%)<core::result::Result<T,E> as core::ops::try_trait::Try>::branch (165 samples, 0.04%)alloc::raw_vec::RawVec<T,A>::set_ptr_and_cap (69 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (362 samples, 0.10%)alloc::raw_vec::RawVec<T,A>::grow_amortized (318 samples, 0.08%)alloc::raw_vec::finish_grow (233 samples, 0.06%)core::result::Result<T,E>::map_err (88 samples, 0.02%)core::mem::drop (70 samples, 0.02%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::io::scheduled_io::Waiters>> (70 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (70 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::unlock (61 samples, 0.02%)core::sync::atomic::AtomicU32::swap (59 samples, 0.02%)core::sync::atomic::atomic_swap (59 samples, 0.02%)alloc_new_heap (197 samples, 0.05%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (197 samples, 0.05%)core::fmt::Formatter::pad (45 samples, 0.01%)core::fmt::Formatter::pad_integral (93 samples, 0.02%)core::fmt::Formatter::pad_integral::write_prefix (47 samples, 0.01%)core::ptr::drop_in_place<[core::option::Option<core::task::wake::Waker>: 32]> (662 samples, 0.17%)core::ptr::drop_in_place<core::option::Option<core::task::wake::Waker>> (346 samples, 0.09%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (578 samples, 0.15%)core::ptr::drop_in_place<torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}}> (54 samples, 0.01%)core::str::converts::from_utf8 (81 samples, 0.02%)core::str::validations::run_utf8_validation (70 samples, 0.02%)epoll_wait (87 samples, 0.02%)hashbrown::map::HashMap<K,V,S,A>::insert (52 samples, 0.01%)malloc_consolidate (109 samples, 0.03%)std::sys::pal::unix::time::Timespec::new (76 samples, 0.02%)std::sys::pal::unix::time::Timespec::now (357 samples, 0.09%)core::cmp::impls::<impl core::cmp::PartialOrd<&B> for &A>::ge (42 samples, 0.01%)core::cmp::PartialOrd::ge (42 samples, 0.01%)std::sys::pal::unix::time::Timespec::sub_timespec (202 samples, 0.05%)std::sys::sync::mutex::futex::Mutex::lock_contended (233 samples, 0.06%)std::sys::sync::mutex::futex::Mutex::spin (67 samples, 0.02%)std::sys_common::net::TcpListener::socket_addr (85 samples, 0.02%)std::sys_common::net::sockname (80 samples, 0.02%)syscall (511 samples, 0.14%)core::ptr::drop_in_place<core::cell::RefMut<core::option::Option<alloc::boxed::Box<tokio::runtime::scheduler::multi_thread::worker::Core>>>> (95 samples, 0.03%)core::ptr::drop_in_place<core::cell::BorrowRefMut> (95 samples, 0.03%)<core::cell::BorrowRefMut as core::ops::drop::Drop>::drop (95 samples, 0.03%)core::cell::Cell<T>::set (95 samples, 0.03%)core::cell::Cell<T>::replace (95 samples, 0.03%)core::mem::replace (95 samples, 0.03%)core::ptr::write (95 samples, 0.03%)tokio::runtime::context::with_scheduler (369 samples, 0.10%)std::thread::local::LocalKey<T>::try_with (256 samples, 0.07%)tokio::runtime::context::with_scheduler::{{closure}} (255 samples, 0.07%)tokio::runtime::context::scoped::Scoped<T>::with (255 samples, 0.07%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (255 samples, 0.07%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (255 samples, 0.07%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (156 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (52 samples, 0.01%)tokio::io::ready::Ready::from_mio (40 samples, 0.01%)core::sync::atomic::AtomicUsize::compare_exchange (39 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (39 samples, 0.01%)tokio::runtime::io::driver::Driver::turn (609 samples, 0.16%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (439 samples, 0.12%)__entry_text_start (200 samples, 0.05%)__entry_text_start (331 samples, 0.09%)__entry_text_start (74 samples, 0.02%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (1,520 samples, 0.40%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (828 samples, 0.22%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (279 samples, 0.07%)core::mem::drop (88 samples, 0.02%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::io::scheduled_io::Waiters>> (88 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (88 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::unlock (72 samples, 0.02%)core::sync::atomic::AtomicU32::swap (65 samples, 0.02%)core::sync::atomic::atomic_swap (65 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (69 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (66 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (48 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (566 samples, 0.15%)alloc::vec::Vec<T,A>::pop (77 samples, 0.02%)core::ptr::read (48 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (50 samples, 0.01%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (50 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (46 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (46 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (4,232 samples, 1.12%)core::sync::atomic::atomic_add (4,232 samples, 1.12%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (4,298 samples, 1.14%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (4,674 samples, 1.24%)__entry_text_start (67 samples, 0.02%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (210 samples, 0.06%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (107 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark_condvar (84 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (82 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::next_remote_task (120 samples, 0.03%)tokio::runtime::scheduler::inject::shared::Shared<T>::is_empty (48 samples, 0.01%)tokio::runtime::scheduler::inject::shared::Shared<T>::len (48 samples, 0.01%)core::sync::atomic::AtomicUsize::load (48 samples, 0.01%)core::sync::atomic::atomic_load (48 samples, 0.01%)tokio::runtime::task::core::Header::get_owner_id (61 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with (61 samples, 0.02%)tokio::runtime::task::core::Header::get_owner_id::{{closure}} (61 samples, 0.02%)std::sync::poison::Flag::done (462 samples, 0.12%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::util::linked_list::LinkedList<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>,tokio::runtime::task::core::Header>>> (540 samples, 0.14%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (540 samples, 0.14%)std::sys::sync::mutex::futex::Mutex::unlock (76 samples, 0.02%)core::sync::atomic::AtomicUsize::fetch_sub (91 samples, 0.02%)core::sync::atomic::atomic_sub (91 samples, 0.02%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (61 samples, 0.02%)core::result::Result<T,E>::is_err (88 samples, 0.02%)core::result::Result<T,E>::is_ok (88 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (884 samples, 0.23%)tokio::runtime::task::list::OwnedTasks<S>::remove (873 samples, 0.23%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (796 samples, 0.21%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (102 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (102 samples, 0.03%)std::sync::mutex::Mutex<T>::lock (102 samples, 0.03%)std::sys::sync::mutex::futex::Mutex::lock (96 samples, 0.03%)core::cell::RefCell<T>::borrow_mut (38 samples, 0.01%)core::cell::RefCell<T>::try_borrow_mut (38 samples, 0.01%)core::cell::BorrowRefMut::new (38 samples, 0.01%)tokio::runtime::scheduler::defer::Defer::wake (86 samples, 0.02%)std::sys::pal::unix::futex::futex_wait (101 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (186 samples, 0.05%)std::sync::condvar::Condvar::wait (132 samples, 0.03%)std::sys::sync::condvar::futex::Condvar::wait (130 samples, 0.03%)std::sys::sync::condvar::futex::Condvar::wait_optional_timeout (130 samples, 0.03%)core::sync::atomic::AtomicUsize::compare_exchange (69 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (69 samples, 0.02%)tokio::runtime::scheduler::multi_thread::park::Inner::park_driver (281 samples, 0.07%)tokio::runtime::driver::Driver::park (96 samples, 0.03%)tokio::runtime::driver::TimeDriver::park (96 samples, 0.03%)tokio::runtime::time::Driver::park (91 samples, 0.02%)tokio::runtime::scheduler::multi_thread::park::Parker::park (627 samples, 0.17%)tokio::runtime::scheduler::multi_thread::park::Inner::park (627 samples, 0.17%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (1,130 samples, 0.30%)tokio::runtime::scheduler::multi_thread::worker::Core::should_notify_others (62 samples, 0.02%)core::cell::RefCell<T>::borrow_mut (84 samples, 0.02%)core::cell::RefCell<T>::try_borrow_mut (84 samples, 0.02%)core::cell::BorrowRefMut::new (84 samples, 0.02%)core::cell::RefCell<T>::borrow_mut (250 samples, 0.07%)core::cell::RefCell<T>::try_borrow_mut (250 samples, 0.07%)core::cell::BorrowRefMut::new (250 samples, 0.07%)tokio::runtime::coop::budget (368 samples, 0.10%)tokio::runtime::coop::with_budget (368 samples, 0.10%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (318 samples, 0.08%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (82 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (600 samples, 0.16%)tokio::runtime::signal::Driver::process (79 samples, 0.02%)tokio::runtime::io::driver::signal::<impl tokio::runtime::io::driver::Driver>::consume_signal_ready (49 samples, 0.01%)core::ptr::drop_in_place<alloc::sync::Arc<tokio::net::udp::UdpSocket>> (62 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (62 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker::core::Tracker>> (140 samples, 0.04%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (140 samples, 0.04%)core::sync::atomic::AtomicUsize::fetch_sub (91 samples, 0.02%)core::sync::atomic::atomic_sub (91 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (261 samples, 0.07%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (260 samples, 0.07%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (233 samples, 0.06%)tokio::runtime::task::core::Core<T,S>::set_stage (353 samples, 0.09%)core::sync::atomic::AtomicUsize::fetch_xor (127 samples, 0.03%)core::sync::atomic::atomic_xor (127 samples, 0.03%)tokio::runtime::task::state::State::transition_to_complete (135 samples, 0.04%)core::sync::atomic::AtomicUsize::fetch_sub (44 samples, 0.01%)core::sync::atomic::atomic_sub (44 samples, 0.01%)tokio::runtime::task::harness::Harness<T,S>::complete (242 samples, 0.06%)tokio::runtime::task::state::State::transition_to_terminal (67 samples, 0.02%)tokio::runtime::task::harness::Harness<T,S>::dealloc (53 samples, 0.01%)std::sync::poison::Flag::done (203 samples, 0.05%)core::ptr::drop_in_place<tokio::util::sharded_list::ShardGuard<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::current_thread::Handle>>,tokio::runtime::task::core::Header>> (272 samples, 0.07%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::util::linked_list::LinkedList<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::current_thread::Handle>>,tokio::runtime::task::core::Header>>> (272 samples, 0.07%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (272 samples, 0.07%)std::sys::sync::mutex::futex::Mutex::unlock (65 samples, 0.02%)core::sync::atomic::AtomicU32::swap (48 samples, 0.01%)core::sync::atomic::atomic_swap (48 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (78 samples, 0.02%)core::sync::atomic::atomic_add (78 samples, 0.02%)<tokio::runtime::task::Task<S> as tokio::util::linked_list::Link>::pointers (39 samples, 0.01%)tokio::runtime::task::core::Header::get_trailer (39 samples, 0.01%)tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (531 samples, 0.14%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::push_front (144 samples, 0.04%)core::result::Result<T,E>::is_err (40 samples, 0.01%)core::result::Result<T,E>::is_ok (40 samples, 0.01%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (1,157 samples, 0.31%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::lock_shard (431 samples, 0.11%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (430 samples, 0.11%)tokio::loom::std::mutex::Mutex<T>::lock (429 samples, 0.11%)std::sync::mutex::Mutex<T>::lock (429 samples, 0.11%)std::sys::sync::mutex::futex::Mutex::lock (425 samples, 0.11%)core::sync::atomic::AtomicU32::compare_exchange (385 samples, 0.10%)core::sync::atomic::atomic_compare_exchange (385 samples, 0.10%)tokio::runtime::task::raw::drop_abort_handle (184 samples, 0.05%)tokio::runtime::task::harness::Harness<T,S>::drop_reference (167 samples, 0.04%)tokio::runtime::task::state::State::ref_dec (167 samples, 0.04%)core::sync::atomic::AtomicUsize::compare_exchange (44 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (44 samples, 0.01%)tokio::runtime::task::raw::drop_join_handle_slow (126 samples, 0.03%)tokio::runtime::task::harness::Harness<T,S>::drop_join_handle_slow (117 samples, 0.03%)tokio::runtime::task::state::State::unset_join_interested (76 samples, 0.02%)tokio::runtime::task::state::State::fetch_update (76 samples, 0.02%)core::result::Result<T,E>::is_err (53 samples, 0.01%)core::result::Result<T,E>::is_ok (53 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::maintenance (87 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (82 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (82 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (82 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (56 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (60 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_parked (78 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (74 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (74 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (74 samples, 0.02%)core::result::Result<T,E>::is_err (60 samples, 0.02%)core::result::Result<T,E>::is_ok (60 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::park (266 samples, 0.07%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_parked (91 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (44 samples, 0.01%)core::sync::atomic::AtomicU64::compare_exchange (98 samples, 0.03%)core::sync::atomic::atomic_compare_exchange (98 samples, 0.03%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (272 samples, 0.07%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (339 samples, 0.09%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (549 samples, 0.15%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (980 samples, 0.26%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (980 samples, 0.26%)tokio::runtime::scheduler::multi_thread::worker::run (980 samples, 0.26%)tokio::runtime::context::runtime::enter_runtime (980 samples, 0.26%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (980 samples, 0.26%)tokio::runtime::context::set_scheduler (980 samples, 0.26%)std::thread::local::LocalKey<T>::with (980 samples, 0.26%)std::thread::local::LocalKey<T>::try_with (980 samples, 0.26%)tokio::runtime::context::set_scheduler::{{closure}} (980 samples, 0.26%)tokio::runtime::context::scoped::Scoped<T>::set (980 samples, 0.26%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (980 samples, 0.26%)tokio::runtime::scheduler::multi_thread::worker::Context::run (980 samples, 0.26%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (1,050 samples, 0.28%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (1,047 samples, 0.28%)tokio::runtime::task::core::Core<T,S>::poll (1,075 samples, 0.28%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (1,099 samples, 0.29%)tokio::runtime::task::harness::poll_future::{{closure}} (1,099 samples, 0.29%)tokio::runtime::task::harness::poll_future (1,117 samples, 0.30%)std::panic::catch_unwind (1,115 samples, 0.29%)std::panicking::try (1,115 samples, 0.29%)std::panicking::try::do_call (1,114 samples, 0.29%)tokio::runtime::task::state::State::transition_to_running (377 samples, 0.10%)tokio::runtime::task::state::State::fetch_update_action (377 samples, 0.10%)tokio::runtime::task::raw::poll (1,566 samples, 0.41%)tokio::runtime::task::harness::Harness<T,S>::poll (1,533 samples, 0.41%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (1,525 samples, 0.40%)core::array::<impl core::default::Default for [T: 32]>::default (42 samples, 0.01%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (190 samples, 0.05%)tokio::loom::std::mutex::Mutex<T>::lock (47 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (47 samples, 0.01%)tokio::runtime::time::source::TimeSource::instant_to_tick (43 samples, 0.01%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process (73 samples, 0.02%)tokio::runtime::time::source::TimeSource::now (50 samples, 0.01%)tokio::runtime::time::Driver::park_internal (346 samples, 0.09%)core::num::<impl u64>::rotate_right (51 samples, 0.01%)tokio::runtime::time::wheel::level::Level::next_occupied_slot (441 samples, 0.12%)tokio::runtime::time::wheel::level::slot_range (99 samples, 0.03%)core::num::<impl usize>::pow (99 samples, 0.03%)tokio::runtime::time::wheel::level::level_range (154 samples, 0.04%)tokio::runtime::time::wheel::level::slot_range (146 samples, 0.04%)core::num::<impl usize>::pow (146 samples, 0.04%)tokio::runtime::time::wheel::level::Level::next_expiration (833 samples, 0.22%)tokio::runtime::time::wheel::level::slot_range (161 samples, 0.04%)core::num::<impl usize>::pow (161 samples, 0.04%)tokio::runtime::time::wheel::Wheel::next_expiration (1,138 samples, 0.30%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::is_empty (55 samples, 0.01%)core::option::Option<T>::is_some (55 samples, 0.01%)torrust_tracker::core::Tracker::get_torrent_peers_for_peer (112 samples, 0.03%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::get_peers_for_client (79 samples, 0.02%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_peers_for_client (48 samples, 0.01%)core::iter::traits::iterator::Iterator::collect (41 samples, 0.01%)<alloc::vec::Vec<T> as core::iter::traits::collect::FromIterator<T>>::from_iter (41 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (41 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter_nested::SpecFromIterNested<T,I>>::from_iter (41 samples, 0.01%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (234 samples, 0.06%)std::hash::random::DefaultHasher::new (108 samples, 0.03%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (98 samples, 0.03%)torrust_tracker_clock::time_extent::Make::now (75 samples, 0.02%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (415 samples, 0.11%)core::sync::atomic::AtomicUsize::fetch_add (129 samples, 0.03%)core::sync::atomic::atomic_add (129 samples, 0.03%)core::ptr::drop_in_place<alloc::sync::Arc<tokio::net::udp::UdpSocket>> (112 samples, 0.03%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (112 samples, 0.03%)core::sync::atomic::AtomicUsize::fetch_sub (49 samples, 0.01%)core::sync::atomic::atomic_sub (49 samples, 0.01%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (1,399 samples, 0.37%)<F as core::future::into_future::IntoFuture>::into_future (40 samples, 0.01%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (330 samples, 0.09%)core::sync::atomic::AtomicUsize::fetch_add (210 samples, 0.06%)core::sync::atomic::atomic_add (210 samples, 0.06%)torrust_tracker::servers::udp::handlers::handle_packet (80 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Arc<tokio::net::udp::UdpSocket>> (212 samples, 0.06%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (212 samples, 0.06%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (321 samples, 0.08%)torrust_tracker::core::Tracker::announce::{{closure}} (434 samples, 0.11%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (635 samples, 0.17%)torrust_tracker::servers::udp::handlers::handle_connect::{{closure}} (56 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (986 samples, 0.26%)core::fmt::Formatter::new (50 samples, 0.01%)core::intrinsics::copy_nonoverlapping (39 samples, 0.01%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (208 samples, 0.05%)core::fmt::num::imp::fmt_u64 (188 samples, 0.05%)core::fmt::num::imp::<impl core::fmt::Display for i64>::fmt (131 samples, 0.03%)core::fmt::num::imp::fmt_u64 (119 samples, 0.03%)<T as alloc::string::ToString>::to_string (426 samples, 0.11%)core::option::Option<T>::expect (74 samples, 0.02%)torrust_tracker::servers::udp::logging::map_action_name (51 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (679 samples, 0.18%)<T as alloc::string::ToString>::to_string (106 samples, 0.03%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (72 samples, 0.02%)core::fmt::num::imp::fmt_u64 (58 samples, 0.02%)core::option::Option<T>::expect (38 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (2,539 samples, 0.67%)torrust_tracker::servers::udp::logging::log_response (198 samples, 0.05%)alloc::vec::from_elem (583 samples, 0.15%)<u8 as alloc::vec::spec_from_elem::SpecFromElem>::from_elem (583 samples, 0.15%)alloc::raw_vec::RawVec<T,A>::with_capacity_zeroed_in (583 samples, 0.15%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (583 samples, 0.15%)<alloc::alloc::Global as core::alloc::Allocator>::allocate_zeroed (583 samples, 0.15%)alloc::alloc::Global::alloc_impl (583 samples, 0.15%)alloc::alloc::alloc_zeroed (583 samples, 0.15%)__rdl_alloc_zeroed (583 samples, 0.15%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc_zeroed (583 samples, 0.15%)__entry_text_start (110 samples, 0.03%)__entry_text_start (278 samples, 0.07%)std::sys::pal::unix::cvt (338 samples, 0.09%)<isize as std::sys::pal::unix::IsMinusOne>::is_minus_one (338 samples, 0.09%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (6,133 samples, 1.62%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (2,253 samples, 0.60%)torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (1,179 samples, 0.31%)tokio::net::udp::UdpSocket::send_to::{{closure}} (1,082 samples, 0.29%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (1,028 samples, 0.27%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (922 samples, 0.24%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (716 samples, 0.19%)mio::net::udp::UdpSocket::send_to (681 samples, 0.18%)mio::io_source::IoSource<T>::do_io (681 samples, 0.18%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (681 samples, 0.18%)mio::net::udp::UdpSocket::send_to::{{closure}} (681 samples, 0.18%)std::net::udp::UdpSocket::send_to (681 samples, 0.18%)std::sys_common::net::UdpSocket::send_to (661 samples, 0.17%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (38 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (38 samples, 0.01%)core::sync::atomic::atomic_add (38 samples, 0.01%)alloc::vec::Vec<T>::with_capacity (46 samples, 0.01%)alloc::vec::Vec<T,A>::with_capacity_in (46 samples, 0.01%)tokio::net::udp::UdpSocket::readable::{{closure}} (474 samples, 0.13%)tokio::net::udp::UdpSocket::ready::{{closure}} (454 samples, 0.12%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (655 samples, 0.17%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (79 samples, 0.02%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (58 samples, 0.02%)__rdl_alloc (49 samples, 0.01%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (49 samples, 0.01%)std::sys::pal::unix::alloc::aligned_malloc (49 samples, 0.01%)core::option::Option<T>::map (299 samples, 0.08%)tokio::task::spawn::spawn_inner::{{closure}} (299 samples, 0.08%)tokio::runtime::scheduler::Handle::spawn (299 samples, 0.08%)tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (299 samples, 0.08%)tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (299 samples, 0.08%)tokio::runtime::task::list::OwnedTasks<S>::bind (287 samples, 0.08%)tokio::runtime::task::new_task (278 samples, 0.07%)tokio::runtime::task::raw::RawTask::new (278 samples, 0.07%)tokio::runtime::task::core::Cell<T,S>::new (278 samples, 0.07%)alloc::boxed::Box<T>::new (56 samples, 0.01%)alloc::alloc::exchange_malloc (56 samples, 0.01%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (56 samples, 0.01%)alloc::alloc::Global::alloc_impl (56 samples, 0.01%)alloc::alloc::alloc (56 samples, 0.01%)torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (1,073 samples, 0.28%)torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (1,073 samples, 0.28%)torrust_tracker::servers::udp::server::Udp::spawn_request_processor (300 samples, 0.08%)tokio::task::spawn::spawn (300 samples, 0.08%)tokio::task::spawn::spawn_inner (300 samples, 0.08%)tokio::runtime::context::current::with_current (300 samples, 0.08%)std::thread::local::LocalKey<T>::try_with (300 samples, 0.08%)tokio::runtime::context::current::with_current::{{closure}} (300 samples, 0.08%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (81 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::values (62 samples, 0.02%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::insert_or_update_peer_and_get_stats (353 samples, 0.09%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer_and_get_stats (273 samples, 0.07%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer (191 samples, 0.05%)core::option::Option<T>::is_some_and (46 samples, 0.01%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer::{{closure}} (45 samples, 0.01%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (44 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (44 samples, 0.01%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (61 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (48 samples, 0.01%)core::sync::atomic::AtomicU32::load (46 samples, 0.01%)core::sync::atomic::atomic_load (46 samples, 0.01%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (74 samples, 0.02%)std::sync::rwlock::RwLock<T>::read (68 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read (65 samples, 0.02%)tracing::span::Span::log (64 samples, 0.02%)core::fmt::Arguments::new_v1 (39 samples, 0.01%)tracing_core::span::Record::is_empty (67 samples, 0.02%)tracing_core::field::ValueSet::is_empty (67 samples, 0.02%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::all (43 samples, 0.01%)tracing::span::Span::record_all (253 samples, 0.07%)unlink_chunk (517 samples, 0.14%)uuid::builder::Builder::with_variant (112 samples, 0.03%)__entry_text_start (86 samples, 0.02%)uuid::builder::Builder::from_random_bytes (150 samples, 0.04%)uuid::builder::Builder::with_version (38 samples, 0.01%)__entry_text_start (187 samples, 0.05%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (433 samples, 0.11%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (433 samples, 0.11%)rand::rng::Rng::gen (445 samples, 0.12%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (445 samples, 0.12%)rand::rng::Rng::gen (445 samples, 0.12%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (445 samples, 0.12%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (445 samples, 0.12%)[unknown] (62,585 samples, 16.54%)[unknown]uuid::v4::<impl uuid::Uuid>::new_v4 (699 samples, 0.18%)uuid::rng::bytes (533 samples, 0.14%)rand::random (533 samples, 0.14%)__entry_text_start (59 samples, 0.02%)__GI___libc_malloc (138 samples, 0.04%)__memcpy_avx512_unaligned_erms (107 samples, 0.03%)_int_free (89 samples, 0.02%)<core::result::Result<T,E> as core::ops::try_trait::Try>::branch (41 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (73 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::grow_amortized (64 samples, 0.02%)__perf_event_task_sched_in (42 samples, 0.01%)ctx_sched_in (42 samples, 0.01%)visit_groups_merge.constprop.0.isra.0 (42 samples, 0.01%)__x64_sys_futex (45 samples, 0.01%)do_futex (45 samples, 0.01%)futex_wait (45 samples, 0.01%)futex_wait_queue (45 samples, 0.01%)schedule (45 samples, 0.01%)__schedule (45 samples, 0.01%)finish_task_switch.isra.0 (45 samples, 0.01%)std::sys::pal::unix::futex::futex_wait (48 samples, 0.01%)syscall (48 samples, 0.01%)entry_SYSCALL_64_after_hwframe (48 samples, 0.01%)do_syscall_64 (48 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (50 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (50 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::lock (50 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::lock_contended (50 samples, 0.01%)[[vdso]] (49 samples, 0.01%)[[vdso]] (43 samples, 0.01%)[[vdso]] (1,196 samples, 0.32%)__pow (1,270 samples, 0.34%)std::f64::<impl f64>::powf (1,334 samples, 0.35%)std::time::Instant::now (43 samples, 0.01%)std::sys::pal::unix::time::Instant::now (43 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::end_processing_scheduled_tasks (1,559 samples, 0.41%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_processing_scheduled_tasks (47 samples, 0.01%)std::time::Instant::now (45 samples, 0.01%)std::sys::pal::unix::time::Instant::now (45 samples, 0.01%)std::sys::pal::unix::time::Timespec::now (40 samples, 0.01%)ep_item_poll.isra.0 (48 samples, 0.01%)ep_send_events (86 samples, 0.02%)__x64_sys_epoll_wait (135 samples, 0.04%)do_epoll_wait (131 samples, 0.03%)ep_poll (124 samples, 0.03%)mio::poll::Poll::poll (144 samples, 0.04%)mio::sys::unix::selector::epoll::Selector::select (144 samples, 0.04%)epoll_wait (143 samples, 0.04%)entry_SYSCALL_64_after_hwframe (140 samples, 0.04%)do_syscall_64 (137 samples, 0.04%)tokio::runtime::io::driver::Driver::turn (157 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (169 samples, 0.04%)tokio::runtime::scheduler::multi_thread::park::Parker::park_timeout (168 samples, 0.04%)tokio::runtime::driver::Driver::park_timeout (168 samples, 0.04%)tokio::runtime::driver::TimeDriver::park_timeout (168 samples, 0.04%)tokio::runtime::time::Driver::park_timeout (168 samples, 0.04%)tokio::runtime::time::Driver::park_internal (161 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::Context::maintenance (244 samples, 0.06%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (149 samples, 0.04%)alloc::sync::Arc<T,A>::inner (149 samples, 0.04%)core::ptr::non_null::NonNull<T>::as_ref (149 samples, 0.04%)core::result::Result<T,E>::is_ok (44 samples, 0.01%)core::sync::atomic::AtomicUsize::compare_exchange (43 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (43 samples, 0.01%)core::bool::<impl bool>::then (63 samples, 0.02%)__x64_sys_futex (297 samples, 0.08%)futex_setup_timer (44 samples, 0.01%)_raw_spin_unlock (43 samples, 0.01%)futex_unqueue (124 samples, 0.03%)__futex_queue (105 samples, 0.03%)plist_add (143 samples, 0.04%)_raw_spin_lock (582 samples, 0.15%)clear_buddies (87 samples, 0.02%)__update_load_avg_cfs_rq (68 samples, 0.02%)__update_load_avg_se (74 samples, 0.02%)clear_buddies (130 samples, 0.03%)update_cfs_group (489 samples, 0.13%)reweight_entity (204 samples, 0.05%)__calc_delta (373 samples, 0.10%)__cgroup_account_cputime (50 samples, 0.01%)cpuacct_charge (423 samples, 0.11%)update_curr (1,525 samples, 0.40%)update_min_vruntime (71 samples, 0.02%)__update_load_avg_cfs_rq (489 samples, 0.13%)__update_load_avg_se (418 samples, 0.11%)update_load_avg (1,435 samples, 0.38%)dequeue_entity (4,560 samples, 1.21%)update_min_vruntime (146 samples, 0.04%)update_cfs_group (222 samples, 0.06%)update_curr (42 samples, 0.01%)dequeue_task_fair (5,294 samples, 1.40%)update_min_vruntime (65 samples, 0.02%)dequeue_task (5,351 samples, 1.41%)dequeue_task_fair (63 samples, 0.02%)_raw_spin_unlock (82 samples, 0.02%)__rcu_read_unlock (179 samples, 0.05%)perf_ibs_add (421 samples, 0.11%)perf_ibs_start (364 samples, 0.10%)perf_event_update_userpage (129 samples, 0.03%)event_sched_in (736 samples, 0.19%)merge_sched_in (889 samples, 0.23%)perf_pmu_nop_int (88 samples, 0.02%)ctx_sched_in (1,272 samples, 0.34%)visit_groups_merge.constprop.0.isra.0 (1,233 samples, 0.33%)rb_next (161 samples, 0.04%)perf_ctx_enable (43 samples, 0.01%)perf_ctx_sched_task_cb (68 samples, 0.02%)perf_pmu_nop_void (75 samples, 0.02%)__perf_event_task_sched_in (1,616 samples, 0.43%)__rcu_read_unlock (40 samples, 0.01%)_raw_spin_unlock (100 samples, 0.03%)finish_task_switch.isra.0 (2,202 samples, 0.58%)pick_next_task_fair (308 samples, 0.08%)newidle_balance (193 samples, 0.05%)__rcu_read_lock (39 samples, 0.01%)pick_next_task_idle (192 samples, 0.05%)__update_idle_core (128 samples, 0.03%)put_prev_entity (53 samples, 0.01%)check_cfs_rq_runtime (38 samples, 0.01%)check_spread.isra.0 (133 samples, 0.04%)pick_next_task (1,105 samples, 0.29%)put_prev_task_fair (422 samples, 0.11%)put_prev_entity (169 samples, 0.04%)__rcu_read_unlock (39 samples, 0.01%)_raw_spin_lock (91 samples, 0.02%)perf_ibs_del (737 samples, 0.19%)perf_ibs_stop (677 samples, 0.18%)native_read_msr (236 samples, 0.06%)event_sched_out (785 samples, 0.21%)__pmu_ctx_sched_out (1,001 samples, 0.26%)group_sched_out (928 samples, 0.25%)perf_ibs_del (73 samples, 0.02%)ctx_sched_out (1,285 samples, 0.34%)sched_clock_cpu (225 samples, 0.06%)sched_clock (194 samples, 0.05%)native_sched_clock (194 samples, 0.05%)perf_ctx_disable (116 samples, 0.03%)perf_ctx_sched_task_cb (85 samples, 0.02%)perf_pmu_nop_void (71 samples, 0.02%)__perf_event_task_sched_out (1,815 samples, 0.48%)perf_event_context_sched_out (1,620 samples, 0.43%)prepare_task_switch (1,888 samples, 0.50%)psi_group_change (82 samples, 0.02%)psi_group_change (1,810 samples, 0.48%)record_times (50 samples, 0.01%)record_times (90 samples, 0.02%)psi_task_switch (2,481 samples, 0.66%)sched_clock_cpu (300 samples, 0.08%)sched_clock (256 samples, 0.07%)native_sched_clock (256 samples, 0.07%)put_prev_task_fair (111 samples, 0.03%)rcu_note_context_switch (60 samples, 0.02%)__schedule (14,876 samples, 3.93%)__sc..update_rq_clock (114 samples, 0.03%)sched_clock_cpu (81 samples, 0.02%)sched_clock (66 samples, 0.02%)native_sched_clock (66 samples, 0.02%)futex_wait_queue (15,388 samples, 4.07%)fute..schedule (15,047 samples, 3.98%)sche..__get_user_nocheck_4 (99 samples, 0.03%)_raw_spin_lock (50 samples, 0.01%)futex_hash (258 samples, 0.07%)futex_q_lock (294 samples, 0.08%)futex_q_unlock (47 samples, 0.01%)futex_wait_setup (1,137 samples, 0.30%)get_futex_key (137 samples, 0.04%)get_futex_key (71 samples, 0.02%)futex_wait (17,298 samples, 4.57%)futex..schedule (42 samples, 0.01%)do_futex (17,499 samples, 4.62%)do_fu..__x64_sys_futex (17,785 samples, 4.70%)__x64..do_futex (50 samples, 0.01%)__put_user_8 (417 samples, 0.11%)__get_user_8 (293 samples, 0.08%)__rseq_handle_notify_resume (2,024 samples, 0.53%)rseq_ip_fixup (472 samples, 0.12%)rseq_get_rseq_cs.isra.0 (88 samples, 0.02%)blkcg_maybe_throttle_current (123 samples, 0.03%)mem_cgroup_handle_over_high (234 samples, 0.06%)exit_to_user_mode_loop (2,785 samples, 0.74%)mem_cgroup_handle_over_high (59 samples, 0.02%)exit_to_user_mode_prepare (3,301 samples, 0.87%)switch_fpu_return (62 samples, 0.02%)fpregs_assert_state_consistent (42 samples, 0.01%)do_syscall_64 (21,396 samples, 5.65%)do_sysc..syscall_exit_to_user_mode (3,407 samples, 0.90%)entry_SYSCALL_64_after_hwframe (21,799 samples, 5.76%)entry_S..std::sys::pal::unix::futex::futex_wait (22,733 samples, 6.01%)std::sys..syscall (22,455 samples, 5.93%)syscallsyscall_return_via_sysret (192 samples, 0.05%)core::result::Result<T,E>::is_err (236 samples, 0.06%)core::result::Result<T,E>::is_ok (236 samples, 0.06%)std::sync::condvar::Condvar::wait (22,990 samples, 6.08%)std::syn..std::sys::sync::condvar::futex::Condvar::wait (22,989 samples, 6.08%)std::sys..std::sys::sync::condvar::futex::Condvar::wait_optional_timeout (22,989 samples, 6.08%)std::sys..std::sys::sync::mutex::futex::Mutex::lock (256 samples, 0.07%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (23,237 samples, 6.14%)tokio::r..tokio::loom::std::mutex::Mutex<T>::lock (175 samples, 0.05%)std::sync::mutex::Mutex<T>::lock (86 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (82 samples, 0.02%)core::sync::atomic::AtomicU32::compare_exchange (82 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (82 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (127 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Parker::park (102 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Inner::park (102 samples, 0.03%)core::array::<impl core::default::Default for [T: 32]>::default (58 samples, 0.02%)tokio::runtime::time::wheel::level::Level::next_occupied_slot (104 samples, 0.03%)tokio::runtime::time::wheel::level::level_range (41 samples, 0.01%)tokio::runtime::time::wheel::level::Level::next_expiration (204 samples, 0.05%)tokio::runtime::time::wheel::level::slot_range (48 samples, 0.01%)core::num::<impl usize>::pow (48 samples, 0.01%)tokio::runtime::time::wheel::Wheel::next_expiration (389 samples, 0.10%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::is_empty (61 samples, 0.02%)core::option::Option<T>::is_some (61 samples, 0.02%)tokio::runtime::time::wheel::level::Level::next_expiration (60 samples, 0.02%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (597 samples, 0.16%)tokio::runtime::time::wheel::Wheel::poll_at (82 samples, 0.02%)tokio::runtime::time::wheel::Wheel::next_expiration (70 samples, 0.02%)core::option::Option<T>::map (163 samples, 0.04%)<mio::event::events::Iter as core::iter::traits::iterator::Iterator>::next (174 samples, 0.05%)core::result::Result<T,E>::map (82 samples, 0.02%)mio::sys::unix::selector::epoll::Selector::select::{{closure}} (82 samples, 0.02%)alloc::vec::Vec<T,A>::set_len (51 samples, 0.01%)[[vdso]] (152 samples, 0.04%)__GI___pthread_disable_asynccancel (86 samples, 0.02%)__x64_sys_epoll_wait (99 samples, 0.03%)__fget_light (521 samples, 0.14%)__fdget (595 samples, 0.16%)__rcu_read_unlock (60 samples, 0.02%)__put_user_nocheck_4 (916 samples, 0.24%)__put_user_nocheck_8 (1,566 samples, 0.41%)_raw_write_lock_irq (418 samples, 0.11%)queued_write_lock_slowpath (97 samples, 0.03%)_raw_write_unlock_irq (119 samples, 0.03%)ep_done_scan (55 samples, 0.01%)__pm_relax (54 samples, 0.01%)_raw_write_lock_irq (1,420 samples, 0.38%)queued_write_lock_slowpath (237 samples, 0.06%)_raw_write_unlock_irq (256 samples, 0.07%)_raw_write_lock_irq (1,302 samples, 0.34%)queued_write_lock_slowpath (356 samples, 0.09%)_raw_write_unlock_irq (135 samples, 0.04%)ep_done_scan (1,745 samples, 0.46%)eventfd_poll (107 samples, 0.03%)sock_poll (4,183 samples, 1.11%)udp_poll (3,029 samples, 0.80%)datagram_poll (2,364 samples, 0.62%)ep_item_poll.isra.0 (4,869 samples, 1.29%)udp_poll (92 samples, 0.02%)mutex_lock (697 samples, 0.18%)ep_send_events (9,657 samples, 2.55%)ep..sock_poll (71 samples, 0.02%)mutex_unlock (1,230 samples, 0.33%)_raw_spin_lock_irqsave (40 samples, 0.01%)hrtimer_init_sleeper (55 samples, 0.01%)__hrtimer_init (47 samples, 0.01%)enqueue_hrtimer (76 samples, 0.02%)timerqueue_add (55 samples, 0.01%)__hrtimer_start_range_ns (185 samples, 0.05%)_raw_spin_lock_irqsave (98 samples, 0.03%)__raw_spin_lock_irqsave (98 samples, 0.03%)hrtimer_start_range_ns (349 samples, 0.09%)rb_erase (52 samples, 0.01%)__remove_hrtimer (120 samples, 0.03%)_raw_spin_lock_irqsave (125 samples, 0.03%)__raw_spin_lock_irqsave (123 samples, 0.03%)hrtimer_try_to_cancel (380 samples, 0.10%)_raw_spin_lock (103 samples, 0.03%)clear_buddies (64 samples, 0.02%)__update_load_avg_cfs_rq (53 samples, 0.01%)__update_load_avg_se (58 samples, 0.02%)clear_buddies (87 samples, 0.02%)update_cfs_group (299 samples, 0.08%)reweight_entity (137 samples, 0.04%)__calc_delta (268 samples, 0.07%)__cgroup_account_cputime (46 samples, 0.01%)cpuacct_charge (227 samples, 0.06%)update_curr (885 samples, 0.23%)update_min_vruntime (41 samples, 0.01%)__update_load_avg_cfs_rq (348 samples, 0.09%)__update_load_avg_se (251 samples, 0.07%)update_load_avg (968 samples, 0.26%)dequeue_entity (2,967 samples, 0.78%)update_min_vruntime (92 samples, 0.02%)update_cfs_group (119 samples, 0.03%)dequeue_task_fair (3,442 samples, 0.91%)dequeue_task (3,487 samples, 0.92%)dequeue_task_fair (42 samples, 0.01%)_raw_spin_unlock (55 samples, 0.01%)perf_ibs_add (51 samples, 0.01%)perf_ibs_start (42 samples, 0.01%)event_sched_in (79 samples, 0.02%)merge_sched_in (116 samples, 0.03%)ctx_sched_in (250 samples, 0.07%)visit_groups_merge.constprop.0.isra.0 (226 samples, 0.06%)rb_next (57 samples, 0.02%)perf_ctx_sched_task_cb (49 samples, 0.01%)perf_pmu_nop_void (45 samples, 0.01%)__perf_event_task_sched_in (505 samples, 0.13%)_raw_spin_unlock (65 samples, 0.02%)finish_task_switch.isra.0 (876 samples, 0.23%)newidle_balance (124 samples, 0.03%)pick_next_task_fair (200 samples, 0.05%)pick_next_task_idle (154 samples, 0.04%)__update_idle_core (95 samples, 0.03%)put_prev_entity (44 samples, 0.01%)check_spread.isra.0 (78 samples, 0.02%)pick_next_task (735 samples, 0.19%)put_prev_task_fair (269 samples, 0.07%)put_prev_entity (106 samples, 0.03%)_raw_spin_lock (72 samples, 0.02%)perf_ibs_del (525 samples, 0.14%)perf_ibs_stop (478 samples, 0.13%)native_read_msr (186 samples, 0.05%)event_sched_out (557 samples, 0.15%)__pmu_ctx_sched_out (682 samples, 0.18%)group_sched_out (639 samples, 0.17%)perf_ibs_del (52 samples, 0.01%)ctx_sched_out (932 samples, 0.25%)sched_clock_cpu (202 samples, 0.05%)sched_clock (184 samples, 0.05%)native_sched_clock (184 samples, 0.05%)perf_ctx_disable (49 samples, 0.01%)perf_ctx_sched_task_cb (56 samples, 0.01%)__perf_event_task_sched_out (1,260 samples, 0.33%)perf_event_context_sched_out (1,125 samples, 0.30%)prepare_task_switch (1,336 samples, 0.35%)psi_group_change (71 samples, 0.02%)psi_group_change (1,302 samples, 0.34%)record_times (58 samples, 0.02%)record_times (60 samples, 0.02%)psi_task_switch (1,806 samples, 0.48%)sched_clock_cpu (152 samples, 0.04%)sched_clock (134 samples, 0.04%)native_sched_clock (134 samples, 0.04%)put_prev_task_fair (57 samples, 0.02%)__schedule (9,010 samples, 2.38%)__..update_rq_clock (151 samples, 0.04%)sched_clock_cpu (126 samples, 0.03%)sched_clock (113 samples, 0.03%)native_sched_clock (113 samples, 0.03%)schedule_hrtimeout_range (10,138 samples, 2.68%)sc..schedule_hrtimeout_range_clock (10,108 samples, 2.67%)sc..schedule (9,099 samples, 2.40%)sc..ktime_get_ts64 (80 samples, 0.02%)read_tsc (561 samples, 0.15%)select_estimate_accuracy (800 samples, 0.21%)ep_poll (25,387 samples, 6.71%)ep_pollschedule_hrtimeout_range (48 samples, 0.01%)do_epoll_wait (26,191 samples, 6.92%)do_epoll_..fput (140 samples, 0.04%)ktime_get_ts64 (245 samples, 0.06%)read_tsc (625 samples, 0.17%)__x64_sys_epoll_wait (27,553 samples, 7.28%)__x64_sys_..__put_user_8 (200 samples, 0.05%)__get_user_8 (189 samples, 0.05%)__rseq_handle_notify_resume (1,198 samples, 0.32%)rseq_ip_fixup (299 samples, 0.08%)rseq_get_rseq_cs.isra.0 (38 samples, 0.01%)mem_cgroup_handle_over_high (38 samples, 0.01%)exit_to_user_mode_loop (1,413 samples, 0.37%)exit_to_user_mode_prepare (1,731 samples, 0.46%)syscall_exit_to_user_mode (1,821 samples, 0.48%)do_syscall_64 (29,522 samples, 7.80%)do_syscall_..entry_SYSCALL_64_after_hwframe (29,687 samples, 7.85%)entry_SYSCA..syscall_return_via_sysret (120 samples, 0.03%)epoll_wait (30,473 samples, 8.05%)epoll_waitmio::poll::Poll::poll (30,680 samples, 8.11%)mio::poll::..mio::sys::unix::selector::epoll::Selector::select (30,680 samples, 8.11%)mio::sys::u..tokio::io::ready::Ready::from_mio (42 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (398 samples, 0.11%)tokio::util::bit::Pack::pack (313 samples, 0.08%)core::result::Result<T,E>::is_err (61 samples, 0.02%)core::result::Result<T,E>::is_ok (61 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (119 samples, 0.03%)std::sync::mutex::Mutex<T>::lock (117 samples, 0.03%)std::sys::sync::mutex::futex::Mutex::lock (92 samples, 0.02%)tokio::runtime::io::driver::Driver::turn (31,755 samples, 8.39%)tokio::runti..tokio::runtime::io::scheduled_io::ScheduledIo::wake (430 samples, 0.11%)__GI___clock_gettime (38 samples, 0.01%)std::sys::pal::unix::time::Timespec::now (41 samples, 0.01%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process (52 samples, 0.01%)tokio::runtime::time::source::TimeSource::now (52 samples, 0.01%)tokio::time::clock::Clock::now (43 samples, 0.01%)tokio::time::clock::now (43 samples, 0.01%)std::time::Instant::now (43 samples, 0.01%)std::sys::pal::unix::time::Instant::now (43 samples, 0.01%)std::sys::pal::unix::time::Timespec::now (45 samples, 0.01%)tokio::runtime::time::source::TimeSource::now (56 samples, 0.01%)tokio::time::clock::Clock::now (49 samples, 0.01%)tokio::time::clock::now (49 samples, 0.01%)std::time::Instant::now (49 samples, 0.01%)std::sys::pal::unix::time::Instant::now (49 samples, 0.01%)tokio::runtime::time::Driver::park_internal (31,983 samples, 8.45%)tokio::runti..tokio::runtime::scheduler::multi_thread::park::Inner::park_driver (32,791 samples, 8.67%)tokio::runti..tokio::runtime::driver::Driver::park (32,760 samples, 8.66%)tokio::runti..tokio::runtime::driver::TimeDriver::park (32,760 samples, 8.66%)tokio::runti..tokio::runtime::time::Driver::park (32,760 samples, 8.66%)tokio::runti..tokio::runtime::scheduler::multi_thread::park::Parker::park (56,209 samples, 14.86%)tokio::runtime::schedul..tokio::runtime::scheduler::multi_thread::park::Inner::park (56,209 samples, 14.86%)tokio::runtime::schedul..tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (56,281 samples, 14.87%)tokio::runtime::schedul..core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (121 samples, 0.03%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (121 samples, 0.03%)std::sys::sync::mutex::futex::Mutex::unlock (115 samples, 0.03%)std::sync::mutex::MutexGuard<T>::new (121 samples, 0.03%)std::sync::poison::Flag::guard (117 samples, 0.03%)std::thread::panicking (114 samples, 0.03%)std::panicking::panicking (114 samples, 0.03%)std::panicking::panic_count::count_is_zero (114 samples, 0.03%)core::sync::atomic::AtomicUsize::load (108 samples, 0.03%)core::sync::atomic::atomic_load (108 samples, 0.03%)core::result::Result<T,E>::is_err (207 samples, 0.05%)core::result::Result<T,E>::is_ok (207 samples, 0.05%)core::sync::atomic::AtomicU32::compare_exchange (128 samples, 0.03%)core::sync::atomic::atomic_compare_exchange (128 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (599 samples, 0.16%)std::sync::mutex::Mutex<T>::lock (594 samples, 0.16%)std::sys::sync::mutex::futex::Mutex::lock (473 samples, 0.13%)std::sys::sync::mutex::futex::Mutex::lock_contended (134 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::spin (96 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::maintenance (806 samples, 0.21%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (80 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (80 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::unlock (48 samples, 0.01%)<T as core::slice::cmp::SliceContains>::slice_contains::{{closure}} (589 samples, 0.16%)core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq (589 samples, 0.16%)core::slice::<impl [T]>::contains (1,373 samples, 0.36%)<T as core::slice::cmp::SliceContains>::slice_contains (1,373 samples, 0.36%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::any (1,373 samples, 0.36%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (309 samples, 0.08%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (309 samples, 0.08%)core::result::Result<T,E>::is_err (50 samples, 0.01%)core::result::Result<T,E>::is_ok (50 samples, 0.01%)core::sync::atomic::AtomicU32::compare_exchange (59 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (59 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (1,625 samples, 0.43%)tokio::loom::std::mutex::Mutex<T>::lock (169 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (168 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (136 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (1,675 samples, 0.44%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (95 samples, 0.03%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (95 samples, 0.03%)std::sys::sync::mutex::futex::Mutex::unlock (88 samples, 0.02%)core::result::Result<T,E>::is_err (163 samples, 0.04%)core::result::Result<T,E>::is_ok (163 samples, 0.04%)tokio::loom::std::mutex::Mutex<T>::lock (342 samples, 0.09%)std::sync::mutex::Mutex<T>::lock (341 samples, 0.09%)std::sys::sync::mutex::futex::Mutex::lock (318 samples, 0.08%)std::sys::sync::mutex::futex::Mutex::lock_contended (117 samples, 0.03%)std::sys::sync::mutex::futex::Mutex::spin (81 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_parked (512 samples, 0.14%)tokio::runtime::scheduler::multi_thread::idle::State::dec_num_unparked (67 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::is_empty (57 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::is_empty (46 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::lock_contended (61 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::spin (53 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (88 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (88 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (78 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_if_work_pending (287 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (201 samples, 0.05%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (201 samples, 0.05%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (100 samples, 0.03%)core::sync::atomic::AtomicUsize::fetch_add (98 samples, 0.03%)core::sync::atomic::atomic_add (98 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Context::park (60,093 samples, 15.88%)tokio::runtime::schedule..tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_parked (898 samples, 0.24%)core::cell::RefCell<T>::borrow_mut (81 samples, 0.02%)core::cell::RefCell<T>::try_borrow_mut (81 samples, 0.02%)core::cell::BorrowRefMut::new (81 samples, 0.02%)tokio::runtime::context::budget (60 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (60 samples, 0.02%)[[vdso]] (62 samples, 0.02%)__memcpy_avx512_unaligned_erms (434 samples, 0.11%)__memcpy_avx512_unaligned_erms (591 samples, 0.16%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (596 samples, 0.16%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (596 samples, 0.16%)std::panic::catch_unwind (1,053 samples, 0.28%)std::panicking::try (1,053 samples, 0.28%)std::panicking::try::do_call (1,053 samples, 0.28%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (1,053 samples, 0.28%)core::ops::function::FnOnce::call_once (1,053 samples, 0.28%)tokio::runtime::task::harness::Harness<T,S>::complete::{{closure}} (1,053 samples, 0.28%)tokio::runtime::task::core::Core<T,S>::drop_future_or_output (1,053 samples, 0.28%)tokio::runtime::task::core::Core<T,S>::set_stage (1,041 samples, 0.28%)<core::option::Option<T> as core::ops::try_trait::Try>::branch (38 samples, 0.01%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (76 samples, 0.02%)core::result::Result<T,E>::is_err (750 samples, 0.20%)core::result::Result<T,E>::is_ok (750 samples, 0.20%)tokio::runtime::task::harness::Harness<T,S>::complete (2,086 samples, 0.55%)tokio::runtime::task::harness::Harness<T,S>::release (1,033 samples, 0.27%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (1,023 samples, 0.27%)tokio::runtime::task::list::OwnedTasks<S>::remove (1,023 samples, 0.27%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (892 samples, 0.24%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (788 samples, 0.21%)tokio::loom::std::mutex::Mutex<T>::lock (775 samples, 0.20%)std::sync::mutex::Mutex<T>::lock (775 samples, 0.20%)std::sys::sync::mutex::futex::Mutex::lock (771 samples, 0.20%)core::cell::RefCell<T>::borrow_mut (42 samples, 0.01%)core::cell::RefCell<T>::try_borrow_mut (42 samples, 0.01%)core::cell::BorrowRefMut::new (42 samples, 0.01%)tokio::runtime::coop::budget (46 samples, 0.01%)tokio::runtime::coop::with_budget (46 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (86 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (212 samples, 0.06%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (71 samples, 0.02%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (139 samples, 0.04%)core::sync::atomic::AtomicUsize::fetch_add (115 samples, 0.03%)core::sync::atomic::atomic_add (115 samples, 0.03%)__memcpy_avx512_unaligned_erms (138 samples, 0.04%)core::ptr::drop_in_place<alloc::sync::Arc<tokio::net::udp::UdpSocket>> (174 samples, 0.05%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (174 samples, 0.05%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker::core::Tracker>> (282 samples, 0.07%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (282 samples, 0.07%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (53 samples, 0.01%)std::io::cursor::Cursor<T>::remaining_slice (58 samples, 0.02%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (103 samples, 0.03%)std::io::impls::<impl std::io::Read for &[u8]>::read_exact (45 samples, 0.01%)byteorder::io::ReadBytesExt::read_i32 (108 samples, 0.03%)std::io::cursor::Cursor<T>::remaining_slice (55 samples, 0.01%)byteorder::io::ReadBytesExt::read_i64 (57 samples, 0.02%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (57 samples, 0.02%)aquatic_udp_protocol::request::Request::from_bytes (1,438 samples, 0.38%)__GI___lll_lock_wait_private (55 samples, 0.01%)futex_wait (38 samples, 0.01%)__x64_sys_futex (128 samples, 0.03%)_raw_spin_lock (67 samples, 0.02%)futex_hash (251 samples, 0.07%)_raw_spin_lock (190 samples, 0.05%)native_queued_spin_lock_slowpath (188 samples, 0.05%)futex_wake_mark (67 samples, 0.02%)get_futex_key (210 samples, 0.06%)_raw_spin_lock_irqsave (45 samples, 0.01%)__raw_spin_lock_irqsave (44 samples, 0.01%)__smp_call_single_queue (117 samples, 0.03%)send_call_function_single_ipi (107 samples, 0.03%)native_send_call_func_single_ipi (64 samples, 0.02%)default_send_IPI_single_phys (64 samples, 0.02%)__default_send_IPI_dest_field (54 samples, 0.01%)llist_add_batch (86 samples, 0.02%)ttwu_queue_wakelist (259 samples, 0.07%)futex_wake (1,916 samples, 0.51%)wake_up_q (702 samples, 0.19%)try_to_wake_up (671 samples, 0.18%)do_futex (2,387 samples, 0.63%)__x64_sys_futex (2,616 samples, 0.69%)futex_wake (53 samples, 0.01%)do_futex (38 samples, 0.01%)exit_to_user_mode_prepare (140 samples, 0.04%)do_syscall_64 (2,881 samples, 0.76%)syscall_exit_to_user_mode (189 samples, 0.05%)entry_SYSCALL_64_after_hwframe (3,055 samples, 0.81%)__GI___lll_lock_wake_private (3,294 samples, 0.87%)__x64_sys_futex (43 samples, 0.01%)plist_add (66 samples, 0.02%)update_cfs_group (57 samples, 0.02%)__calc_delta (40 samples, 0.01%)cpuacct_charge (53 samples, 0.01%)update_curr (190 samples, 0.05%)__update_load_avg_cfs_rq (50 samples, 0.01%)__update_load_avg_se (43 samples, 0.01%)update_load_avg (158 samples, 0.04%)dequeue_entity (565 samples, 0.15%)dequeue_task_fair (664 samples, 0.18%)dequeue_task (672 samples, 0.18%)ctx_sched_in (59 samples, 0.02%)visit_groups_merge.constprop.0.isra.0 (57 samples, 0.02%)__perf_event_task_sched_in (108 samples, 0.03%)finish_task_switch.isra.0 (172 samples, 0.05%)pick_next_task_fair (43 samples, 0.01%)pick_next_task (155 samples, 0.04%)put_prev_task_fair (50 samples, 0.01%)perf_ibs_del (92 samples, 0.02%)perf_ibs_stop (71 samples, 0.02%)event_sched_out (103 samples, 0.03%)__pmu_ctx_sched_out (133 samples, 0.04%)group_sched_out (125 samples, 0.03%)ctx_sched_out (177 samples, 0.05%)prepare_task_switch (276 samples, 0.07%)__perf_event_task_sched_out (237 samples, 0.06%)perf_event_context_sched_out (197 samples, 0.05%)psi_group_change (234 samples, 0.06%)psi_task_switch (343 samples, 0.09%)__schedule (1,782 samples, 0.47%)futex_wait_queue (1,981 samples, 0.52%)schedule (1,806 samples, 0.48%)__get_user_nocheck_4 (110 samples, 0.03%)futex_hash (112 samples, 0.03%)futex_q_lock (233 samples, 0.06%)_raw_spin_lock (85 samples, 0.02%)native_queued_spin_lock_slowpath (85 samples, 0.02%)futex_q_unlock (740 samples, 0.20%)futex_wait_setup (1,346 samples, 0.36%)futex_wait (3,533 samples, 0.93%)do_futex (3,584 samples, 0.95%)__x64_sys_futex (3,670 samples, 0.97%)__put_user_8 (52 samples, 0.01%)__rseq_handle_notify_resume (254 samples, 0.07%)exit_to_user_mode_loop (337 samples, 0.09%)exit_to_user_mode_prepare (451 samples, 0.12%)do_syscall_64 (4,206 samples, 1.11%)syscall_exit_to_user_mode (483 samples, 0.13%)entry_SYSCALL_64_after_hwframe (4,268 samples, 1.13%)__GI___lll_lock_wait_private (4,605 samples, 1.22%)futex_wait (4,488 samples, 1.19%)_int_free (5,829 samples, 1.54%)__GI___libc_free (9,251 samples, 2.44%)__..core::ptr::drop_in_place<torrust_tracker::servers::udp::UdpRequest> (9,429 samples, 2.49%)co..core::ptr::drop_in_place<alloc::vec::Vec<u8>> (9,429 samples, 2.49%)co..core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (9,429 samples, 2.49%)co..<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (9,429 samples, 2.49%)<a..<alloc::alloc::Global as core::alloc::Allocator>::deallocate (9,429 samples, 2.49%)<a..alloc::alloc::dealloc (9,429 samples, 2.49%)al..__rdl_dealloc (9,429 samples, 2.49%)__..std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (9,429 samples, 2.49%)st..tracing::span::Span::record_all (174 samples, 0.05%)unlink_chunk (168 samples, 0.04%)core::result::Result<T,E>::expect (227 samples, 0.06%)core::result::Result<T,E>::map_err (66 samples, 0.02%)__GI___clock_gettime (61 samples, 0.02%)std::sys::pal::unix::time::Timespec::now (68 samples, 0.02%)std::time::Instant::elapsed (109 samples, 0.03%)std::time::Instant::now (80 samples, 0.02%)std::sys::pal::unix::time::Instant::now (80 samples, 0.02%)std::sys::pal::unix::cvt (48 samples, 0.01%)__x64_sys_getsockname (580 samples, 0.15%)__sys_getsockname (64 samples, 0.02%)__get_user_4 (812 samples, 0.21%)__put_user_nocheck_4 (1,020 samples, 0.27%)_copy_to_user (40 samples, 0.01%)apparmor_socket_getsockname (82 samples, 0.02%)_raw_spin_lock_bh (1,657 samples, 0.44%)_raw_spin_unlock_bh (55 samples, 0.01%)ip4_datagram_release_cb (93 samples, 0.02%)dequeue_entity (62 samples, 0.02%)dequeue_task (76 samples, 0.02%)dequeue_task_fair (75 samples, 0.02%)__schedule (179 samples, 0.05%)__lock_sock (196 samples, 0.05%)schedule (180 samples, 0.05%)_raw_spin_lock_bh (83 samples, 0.02%)native_queued_spin_lock_slowpath (78 samples, 0.02%)_raw_spin_unlock_bh (125 samples, 0.03%)__local_bh_enable_ip (71 samples, 0.02%)lock_sock_nested (508 samples, 0.13%)autoremove_wake_function (166 samples, 0.04%)default_wake_function (166 samples, 0.04%)try_to_wake_up (166 samples, 0.04%)__wake_up_common (169 samples, 0.04%)__wake_up (189 samples, 0.05%)__wake_up_common_lock (188 samples, 0.05%)_raw_spin_unlock_bh (139 samples, 0.04%)__local_bh_enable_ip (118 samples, 0.03%)inet_getname (3,442 samples, 0.91%)release_sock (961 samples, 0.25%)ip4_datagram_release_cb (340 samples, 0.09%)lock_sock_nested (41 samples, 0.01%)__check_object_size.part.0 (339 samples, 0.09%)check_stack_object (284 samples, 0.08%)__check_object_size (408 samples, 0.11%)check_stack_object (69 samples, 0.02%)move_addr_to_user (2,340 samples, 0.62%)copy_user_enhanced_fast_string (1,695 samples, 0.45%)security_socket_getsockname (298 samples, 0.08%)apparmor_socket_getsockname (254 samples, 0.07%)aa_sk_perm (204 samples, 0.05%)__fget_light (1,889 samples, 0.50%)__fdget (1,912 samples, 0.51%)__sys_getsockname (10,968 samples, 2.90%)__..sockfd_lookup_light (2,045 samples, 0.54%)fput (545 samples, 0.14%)inet_getname (177 samples, 0.05%)__x64_sys_getsockname (11,723 samples, 3.10%)__x..syscall_enter_from_user_mode (48 samples, 0.01%)fpregs_assert_state_consistent (42 samples, 0.01%)exit_to_user_mode_prepare (363 samples, 0.10%)do_syscall_64 (12,453 samples, 3.29%)do_..syscall_exit_to_user_mode (571 samples, 0.15%)fpregs_assert_state_consistent (53 samples, 0.01%)entry_SYSCALL_64_after_hwframe (13,099 samples, 3.46%)ent..__GI_getsockname (13,459 samples, 3.56%)__GI..std::sys_common::net::TcpListener::socket_addr::{{closure}} (13,494 samples, 3.57%)std:..tokio::net::udp::UdpSocket::local_addr (13,569 samples, 3.59%)toki..mio::net::udp::UdpSocket::local_addr (13,568 samples, 3.59%)mio:..std::net::tcp::TcpListener::local_addr (13,568 samples, 3.59%)std:..std::sys_common::net::TcpListener::socket_addr (13,567 samples, 3.59%)std:..std::sys_common::net::sockname (13,559 samples, 3.58%)std:..[[vdso]] (338 samples, 0.09%)rand_chacha::guts::ChaCha::pos64 (177 samples, 0.05%)<ppv_lite86::soft::x2<W,G> as core::ops::arith::AddAssign>::add_assign (67 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as core::ops::arith::AddAssign>::add_assign (67 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as core::ops::arith::Add>::add (67 samples, 0.02%)core::core_arch::x86::avx2::_mm256_add_epi32 (67 samples, 0.02%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right16 (40 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right16 (40 samples, 0.01%)core::core_arch::x86::avx2::_mm256_shuffle_epi8 (40 samples, 0.01%)core::core_arch::x86::avx2::_mm256_or_si256 (44 samples, 0.01%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (52 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (52 samples, 0.01%)rand_chacha::guts::round (234 samples, 0.06%)rand_chacha::guts::refill_wide::impl_avx2 (472 samples, 0.12%)rand_chacha::guts::refill_wide::fn_impl (472 samples, 0.12%)rand_chacha::guts::refill_wide_impl (472 samples, 0.12%)<rand_chacha::chacha::ChaCha12Core as rand_core::block::BlockRngCore>::generate (825 samples, 0.22%)rand_chacha::guts::ChaCha::refill4 (825 samples, 0.22%)rand::rng::Rng::gen (912 samples, 0.24%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (912 samples, 0.24%)rand::rng::Rng::gen (912 samples, 0.24%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (912 samples, 0.24%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (912 samples, 0.24%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (912 samples, 0.24%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (912 samples, 0.24%)rand_core::block::BlockRng<R>::generate_and_set (848 samples, 0.22%)<rand::rngs::adapter::reseeding::ReseedingCore<R,Rsdr> as rand_core::block::BlockRngCore>::generate (847 samples, 0.22%)torrust_tracker::servers::udp::handlers::RequestId::make (941 samples, 0.25%)uuid::v4::<impl uuid::Uuid>::new_v4 (921 samples, 0.24%)uuid::rng::bytes (921 samples, 0.24%)rand::random (921 samples, 0.24%)std::sync::mutex::Mutex<T>::lock (52 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::lock (44 samples, 0.01%)core::sync::atomic::AtomicU32::compare_exchange (43 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (43 samples, 0.01%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::get_peers_for_client (125 samples, 0.03%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_peers_for_client (68 samples, 0.02%)core::iter::traits::iterator::Iterator::collect (47 samples, 0.01%)<alloc::vec::Vec<T> as core::iter::traits::collect::FromIterator<T>>::from_iter (47 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (47 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter_nested::SpecFromIterNested<T,I>>::from_iter (47 samples, 0.01%)<core::iter::adapters::cloned::Cloned<I> as core::iter::traits::iterator::Iterator>::next (47 samples, 0.01%)<core::iter::adapters::take::Take<I> as core::iter::traits::iterator::Iterator>::next (47 samples, 0.01%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (93 samples, 0.02%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (124 samples, 0.03%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (124 samples, 0.03%)__memcmp_evex_movbe (244 samples, 0.06%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (89 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (499 samples, 0.13%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (499 samples, 0.13%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (499 samples, 0.13%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (499 samples, 0.13%)<u8 as core::slice::cmp::SliceOrd>::compare (499 samples, 0.13%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (986 samples, 0.26%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (917 samples, 0.24%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (917 samples, 0.24%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (998 samples, 0.26%)torrust_tracker::core::Tracker::get_torrent_peers_for_peer (1,222 samples, 0.32%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (1,079 samples, 0.29%)alloc::collections::btree::map::BTreeMap<K,V,A>::entry (41 samples, 0.01%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (40 samples, 0.01%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (173 samples, 0.05%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (251 samples, 0.07%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (251 samples, 0.07%)core::slice::iter::Iter<T>::post_inc_start (78 samples, 0.02%)core::ptr::non_null::NonNull<T>::add (78 samples, 0.02%)[[vdso]] (78 samples, 0.02%)__memcmp_evex_movbe (402 samples, 0.11%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (258 samples, 0.07%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (1,036 samples, 0.27%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (1,036 samples, 0.27%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (1,036 samples, 0.27%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (1,036 samples, 0.27%)<u8 as core::slice::cmp::SliceOrd>::compare (1,036 samples, 0.27%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (1,912 samples, 0.51%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (1,824 samples, 0.48%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (1,824 samples, 0.48%)alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Immut,K,V,Type>::keys (55 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (1,928 samples, 0.51%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (118 samples, 0.03%)alloc::collections::btree::map::BTreeMap<K,V,A>::values (83 samples, 0.02%)__rdl_alloc (40 samples, 0.01%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (40 samples, 0.01%)alloc::sync::Arc<T>::new (70 samples, 0.02%)alloc::boxed::Box<T>::new (70 samples, 0.02%)alloc::alloc::exchange_malloc (53 samples, 0.01%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (53 samples, 0.01%)alloc::alloc::Global::alloc_impl (53 samples, 0.01%)alloc::alloc::alloc (53 samples, 0.01%)core::option::Option<T>::is_some_and (58 samples, 0.02%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer::{{closure}} (58 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (56 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (56 samples, 0.01%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::insert_or_update_peer_and_get_stats (423 samples, 0.11%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer_and_get_stats (391 samples, 0.10%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer (272 samples, 0.07%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (2,519 samples, 0.67%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (2,482 samples, 0.66%)torrust_tracker::core::Tracker::announce::{{closure}} (3,776 samples, 1.00%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (77 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (79 samples, 0.02%)<core::time::Nanoseconds as core::hash::Hash>::hash (91 samples, 0.02%)core::hash::impls::<impl core::hash::Hash for u32>::hash (91 samples, 0.02%)core::hash::Hasher::write_u32 (91 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (91 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (91 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (91 samples, 0.02%)<core::time::Duration as core::hash::Hash>::hash (188 samples, 0.05%)core::hash::impls::<impl core::hash::Hash for u64>::hash (97 samples, 0.03%)core::hash::Hasher::write_u64 (97 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (97 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (97 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (102 samples, 0.03%)<torrust_tracker_clock::time_extent::TimeExtent as core::hash::Hash>::hash (292 samples, 0.08%)core::hash::impls::<impl core::hash::Hash for u64>::hash (104 samples, 0.03%)core::hash::Hasher::write_u64 (104 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (104 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (104 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (78 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (78 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (77 samples, 0.02%)core::hash::Hasher::write_length_prefix (81 samples, 0.02%)core::hash::Hasher::write_usize (81 samples, 0.02%)core::array::<impl core::hash::Hash for [T: N]>::hash (206 samples, 0.05%)core::hash::impls::<impl core::hash::Hash for [T]>::hash (206 samples, 0.05%)core::hash::impls::<impl core::hash::Hash for u8>::hash_slice (125 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (125 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (125 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (124 samples, 0.03%)core::hash::sip::u8to64_le (47 samples, 0.01%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (589 samples, 0.16%)[[vdso]] (83 samples, 0.02%)core::num::<impl u128>::checked_div (84 samples, 0.02%)_int_free (40 samples, 0.01%)torrust_tracker::servers::udp::connection_cookie::check (748 samples, 0.20%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (154 samples, 0.04%)torrust_tracker_clock::time_extent::Make::now (154 samples, 0.04%)torrust_tracker_clock::clock::working::<impl torrust_tracker_clock::clock::Time for torrust_tracker_clock::clock::Clock<torrust_tracker_clock::clock::working::WorkingClock>>::now (70 samples, 0.02%)std::time::SystemTime::now (66 samples, 0.02%)std::sys::pal::unix::time::SystemTime::now (66 samples, 0.02%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (68 samples, 0.02%)core::array::<impl core::hash::Hash for [T: N]>::hash (68 samples, 0.02%)core::hash::impls::<impl core::hash::Hash for [T]>::hash (68 samples, 0.02%)core::hash::impls::<impl core::hash::Hash for u8>::hash_slice (68 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (68 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (68 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (68 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (4,754 samples, 1.26%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (80 samples, 0.02%)<core::net::socket_addr::SocketAddrV4 as core::hash::Hash>::hash (43 samples, 0.01%)<core::net::ip_addr::Ipv4Addr as core::hash::Hash>::hash (43 samples, 0.01%)core::array::<impl core::hash::Hash for [T: N]>::hash (42 samples, 0.01%)core::hash::impls::<impl core::hash::Hash for [T]>::hash (42 samples, 0.01%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (126 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (86 samples, 0.02%)<core::time::Nanoseconds as core::hash::Hash>::hash (104 samples, 0.03%)core::hash::impls::<impl core::hash::Hash for u32>::hash (104 samples, 0.03%)core::hash::Hasher::write_u32 (104 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (104 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (104 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (123 samples, 0.03%)<core::time::Duration as core::hash::Hash>::hash (232 samples, 0.06%)core::hash::impls::<impl core::hash::Hash for u64>::hash (128 samples, 0.03%)core::hash::Hasher::write_u64 (127 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (127 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (127 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (115 samples, 0.03%)core::hash::sip::u8to64_le (46 samples, 0.01%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (117 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (117 samples, 0.03%)<torrust_tracker_clock::time_extent::TimeExtent as core::hash::Hash>::hash (350 samples, 0.09%)core::hash::impls::<impl core::hash::Hash for u64>::hash (118 samples, 0.03%)core::hash::Hasher::write_u64 (118 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (83 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (83 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (82 samples, 0.02%)core::hash::Hasher::write_length_prefix (86 samples, 0.02%)core::hash::Hasher::write_usize (86 samples, 0.02%)core::array::<impl core::hash::Hash for [T: N]>::hash (229 samples, 0.06%)core::hash::impls::<impl core::hash::Hash for [T]>::hash (229 samples, 0.06%)core::hash::impls::<impl core::hash::Hash for u8>::hash_slice (143 samples, 0.04%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (143 samples, 0.04%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (143 samples, 0.04%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (140 samples, 0.04%)core::hash::sip::u8to64_le (44 samples, 0.01%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (710 samples, 0.19%)[[vdso]] (68 samples, 0.02%)core::num::<impl u128>::checked_div (70 samples, 0.02%)_int_free (40 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_connect::{{closure}} (887 samples, 0.23%)torrust_tracker::servers::udp::connection_cookie::make (865 samples, 0.23%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (146 samples, 0.04%)torrust_tracker_clock::time_extent::Make::now (145 samples, 0.04%)torrust_tracker_clock::clock::working::<impl torrust_tracker_clock::clock::Time for torrust_tracker_clock::clock::Clock<torrust_tracker_clock::clock::working::WorkingClock>>::now (75 samples, 0.02%)std::time::SystemTime::now (70 samples, 0.02%)std::sys::pal::unix::time::SystemTime::now (70 samples, 0.02%)hashbrown::raw::RawTable<T,A>::reserve (47 samples, 0.01%)hashbrown::map::HashMap<K,V,S,A>::insert (59 samples, 0.02%)hashbrown::raw::RawTable<T,A>::find_or_find_insert_slot (49 samples, 0.01%)torrust_tracker::core::ScrapeData::add_file (61 samples, 0.02%)std::collections::hash::map::HashMap<K,V,S>::insert (61 samples, 0.02%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (146 samples, 0.04%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (138 samples, 0.04%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (138 samples, 0.04%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (70 samples, 0.02%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (70 samples, 0.02%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (70 samples, 0.02%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (70 samples, 0.02%)<u8 as core::slice::cmp::SliceOrd>::compare (70 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (147 samples, 0.04%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (5,947 samples, 1.57%)torrust_tracker::servers::udp::handlers::handle_scrape::{{closure}} (265 samples, 0.07%)torrust_tracker::core::Tracker::scrape::{{closure}} (243 samples, 0.06%)torrust_tracker::core::Tracker::get_swarm_metadata (176 samples, 0.05%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (156 samples, 0.04%)<alloc::string::String as core::fmt::Write>::write_str (54 samples, 0.01%)alloc::string::String::push_str (51 samples, 0.01%)alloc::vec::Vec<T,A>::extend_from_slice (51 samples, 0.01%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (51 samples, 0.01%)alloc::vec::Vec<T,A>::append_elements (51 samples, 0.01%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (186 samples, 0.05%)core::fmt::num::imp::fmt_u64 (178 samples, 0.05%)core::fmt::num::imp::<impl core::fmt::Display for i64>::fmt (85 samples, 0.02%)core::fmt::num::imp::fmt_u64 (84 samples, 0.02%)<T as alloc::string::ToString>::to_string (319 samples, 0.08%)core::option::Option<T>::expect (71 samples, 0.02%)__GI___libc_free (39 samples, 0.01%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (112 samples, 0.03%)alloc::alloc::dealloc (112 samples, 0.03%)__rdl_dealloc (112 samples, 0.03%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (112 samples, 0.03%)core::ptr::drop_in_place<alloc::string::String> (162 samples, 0.04%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (162 samples, 0.04%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (162 samples, 0.04%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (162 samples, 0.04%)torrust_tracker::servers::udp::logging::map_action_name (46 samples, 0.01%)binascii::bin2hex (128 samples, 0.03%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (45 samples, 0.01%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (45 samples, 0.01%)core::fmt::write (47 samples, 0.01%)core::fmt::Formatter::write_fmt (136 samples, 0.04%)core::str::converts::from_utf8 (64 samples, 0.02%)core::str::validations::run_utf8_validation (52 samples, 0.01%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (289 samples, 0.08%)torrust_tracker_primitives::info_hash::InfoHash::to_hex_string (293 samples, 0.08%)<T as alloc::string::ToString>::to_string (293 samples, 0.08%)torrust_tracker::servers::udp::logging::log_request (1,025 samples, 0.27%)[[vdso]] (111 samples, 0.03%)alloc::raw_vec::finish_grow (120 samples, 0.03%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (134 samples, 0.04%)alloc::raw_vec::RawVec<T,A>::grow_amortized (134 samples, 0.04%)alloc::vec::Vec<T,A>::reserve (141 samples, 0.04%)alloc::raw_vec::RawVec<T,A>::reserve (141 samples, 0.04%)<alloc::string::String as core::fmt::Write>::write_str (148 samples, 0.04%)alloc::string::String::push_str (145 samples, 0.04%)alloc::vec::Vec<T,A>::extend_from_slice (145 samples, 0.04%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (145 samples, 0.04%)alloc::vec::Vec<T,A>::append_elements (145 samples, 0.04%)[[vdso]] (52 samples, 0.01%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (286 samples, 0.08%)core::fmt::num::imp::fmt_u64 (277 samples, 0.07%)<T as alloc::string::ToString>::to_string (328 samples, 0.09%)core::option::Option<T>::expect (43 samples, 0.01%)core::ptr::drop_in_place<alloc::string::String> (51 samples, 0.01%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (51 samples, 0.01%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (51 samples, 0.01%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (51 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (33,755 samples, 8.92%)torrust_track..torrust_tracker::servers::udp::logging::log_response (613 samples, 0.16%)futex_wake (46 samples, 0.01%)do_futex (52 samples, 0.01%)__x64_sys_futex (57 samples, 0.02%)__GI___lll_lock_wake_private (63 samples, 0.02%)entry_SYSCALL_64_after_hwframe (60 samples, 0.02%)do_syscall_64 (60 samples, 0.02%)_int_malloc (252 samples, 0.07%)__libc_calloc (366 samples, 0.10%)__memcpy_avx512_unaligned_erms (86 samples, 0.02%)__memset_avx512_unaligned_erms (51 samples, 0.01%)alloc::vec::from_elem (589 samples, 0.16%)<u8 as alloc::vec::spec_from_elem::SpecFromElem>::from_elem (589 samples, 0.16%)alloc::raw_vec::RawVec<T,A>::with_capacity_zeroed_in (589 samples, 0.16%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (589 samples, 0.16%)<alloc::alloc::Global as core::alloc::Allocator>::allocate_zeroed (562 samples, 0.15%)alloc::alloc::Global::alloc_impl (562 samples, 0.15%)alloc::alloc::alloc_zeroed (562 samples, 0.15%)__rdl_alloc_zeroed (562 samples, 0.15%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc_zeroed (562 samples, 0.15%)byteorder::ByteOrder::write_i32 (106 samples, 0.03%)<byteorder::BigEndian as byteorder::ByteOrder>::write_u32 (106 samples, 0.03%)core::num::<impl u32>::to_be_bytes (106 samples, 0.03%)core::num::<impl u32>::to_be (106 samples, 0.03%)core::num::<impl u32>::swap_bytes (106 samples, 0.03%)byteorder::io::WriteBytesExt::write_i32 (315 samples, 0.08%)std::io::Write::write_all (208 samples, 0.05%)<std::io::cursor::Cursor<alloc::vec::Vec<u8,A>> as std::io::Write>::write (208 samples, 0.05%)std::io::cursor::vec_write (208 samples, 0.05%)std::io::cursor::vec_write_unchecked (132 samples, 0.03%)core::ptr::mut_ptr::<impl *mut T>::copy_from (132 samples, 0.03%)core::intrinsics::copy (132 samples, 0.03%)aquatic_udp_protocol::response::Response::write (634 samples, 0.17%)byteorder::io::WriteBytesExt::write_i64 (65 samples, 0.02%)std::io::Write::write_all (43 samples, 0.01%)<std::io::cursor::Cursor<alloc::vec::Vec<u8,A>> as std::io::Write>::write (43 samples, 0.01%)std::io::cursor::vec_write (43 samples, 0.01%)std::io::cursor::vec_write_unchecked (43 samples, 0.01%)core::ptr::mut_ptr::<impl *mut T>::copy_from (43 samples, 0.01%)core::intrinsics::copy (43 samples, 0.01%)_int_free (514 samples, 0.14%)__GI___libc_free (669 samples, 0.18%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (685 samples, 0.18%)alloc::alloc::dealloc (685 samples, 0.18%)__rdl_dealloc (685 samples, 0.18%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (685 samples, 0.18%)core::ptr::drop_in_place<std::io::cursor::Cursor<alloc::vec::Vec<u8>>> (701 samples, 0.19%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (701 samples, 0.19%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (701 samples, 0.19%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (701 samples, 0.19%)std::io::cursor::Cursor<T>::new (38 samples, 0.01%)core::ptr::drop_in_place<tokio::net::udp::UdpSocket::send_to<&core::net::socket_addr::SocketAddr>::{{closure}}> (48 samples, 0.01%)<F as core::future::into_future::IntoFuture>::into_future (45 samples, 0.01%)tokio::io::ready::Ready::intersection (115 samples, 0.03%)tokio::io::ready::Ready::from_interest (112 samples, 0.03%)tokio::io::interest::Interest::is_readable (90 samples, 0.02%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (369 samples, 0.10%)__GI___pthread_disable_asynccancel (147 samples, 0.04%)__x64_sys_sendto (620 samples, 0.16%)__sys_sendto (40 samples, 0.01%)__check_object_size.part.0 (98 samples, 0.03%)check_stack_object (69 samples, 0.02%)__check_object_size (126 samples, 0.03%)_copy_from_user (248 samples, 0.07%)move_addr_to_kernel (1,485 samples, 0.39%)copy_user_enhanced_fast_string (840 samples, 0.22%)apparmor_socket_sendmsg (984 samples, 0.26%)__rcu_read_lock (164 samples, 0.04%)__rcu_read_unlock (68 samples, 0.02%)inet_send_prepare (44 samples, 0.01%)ip_make_skb (46 samples, 0.01%)ip_route_output_flow (54 samples, 0.01%)security_sk_classify_flow (48 samples, 0.01%)__ip_append_data (56 samples, 0.01%)__rcu_read_lock (60 samples, 0.02%)__rcu_read_unlock (54 samples, 0.01%)__check_object_size (55 samples, 0.01%)alloc_skb_with_frags (38 samples, 0.01%)__check_object_size.part.0 (42 samples, 0.01%)__check_heap_object (206 samples, 0.05%)__virt_addr_valid (473 samples, 0.13%)check_heap_object (1,244 samples, 0.33%)__check_object_size.part.0 (1,487 samples, 0.39%)is_vmalloc_addr (68 samples, 0.02%)__check_object_size (1,571 samples, 0.42%)check_stack_object (57 samples, 0.02%)_copy_from_iter (469 samples, 0.12%)ip_generic_getfrag (3,554 samples, 0.94%)copy_user_enhanced_fast_string (1,329 samples, 0.35%)__build_skb_around (233 samples, 0.06%)__kmalloc_node_track_caller (74 samples, 0.02%)___slab_alloc (129 samples, 0.03%)memcg_slab_post_alloc_hook (87 samples, 0.02%)__kmem_cache_alloc_node (1,044 samples, 0.28%)kmalloc_slab (40 samples, 0.01%)memset_erms (331 samples, 0.09%)__kmalloc_node_track_caller (1,662 samples, 0.44%)__kmem_cache_alloc_node (57 samples, 0.02%)kmalloc_reserve (1,865 samples, 0.49%)kmalloc_size_roundup (156 samples, 0.04%)kmalloc_slab (94 samples, 0.02%)___slab_alloc (69 samples, 0.02%)memcg_slab_post_alloc_hook (59 samples, 0.02%)kmem_cache_alloc_node (700 samples, 0.19%)memset_erms (98 samples, 0.03%)__alloc_skb (4,155 samples, 1.10%)alloc_skb_with_frags (4,425 samples, 1.17%)kmem_cache_alloc_node (48 samples, 0.01%)__ip_append_data (11,490 samples, 3.04%)__i..sock_alloc_send_pskb (6,335 samples, 1.67%)skb_set_owner_w (1,576 samples, 0.42%)__ip_select_ident (1,244 samples, 0.33%)ipv4_mtu (151 samples, 0.04%)__ip_make_skb (3,290 samples, 0.87%)siphash_3u32 (619 samples, 0.16%)ip_generic_getfrag (113 samples, 0.03%)ip_setup_cork (313 samples, 0.08%)ipv4_mtu (121 samples, 0.03%)ipv4_mtu (67 samples, 0.02%)ip_make_skb (15,572 samples, 4.12%)ip_m..__rcu_read_lock (38 samples, 0.01%)__rcu_read_unlock (50 samples, 0.01%)__mkroute_output (806 samples, 0.21%)fib_table_lookup (1,252 samples, 0.33%)fib_lookup_good_nhc (418 samples, 0.11%)ip_route_output_key_hash_rcu (2,298 samples, 0.61%)xfrm_lookup_route (258 samples, 0.07%)xfrm_lookup_with_ifid (213 samples, 0.06%)ip_route_output_flow (3,011 samples, 0.80%)security_sk_classify_flow (115 samples, 0.03%)ip_send_check (954 samples, 0.25%)ipv4_conntrack_defrag (148 samples, 0.04%)ipv4_conntrack_local (711 samples, 0.19%)ipv4_conntrack_defrag (219 samples, 0.06%)get_l4proto (223 samples, 0.06%)hash_conntrack_raw (101 samples, 0.03%)__nf_ct_refresh_acct (57 samples, 0.02%)nf_conntrack_handle_packet (676 samples, 0.18%)nf_conntrack_udp_packet (578 samples, 0.15%)nf_ct_get_tuple (65 samples, 0.02%)__nf_conntrack_find_get (1,301 samples, 0.34%)__rcu_read_lock (58 samples, 0.02%)hash_conntrack_raw (427 samples, 0.11%)nf_ct_get_tuple (421 samples, 0.11%)nf_conntrack_in (4,938 samples, 1.31%)resolve_normal_ct (3,247 samples, 0.86%)siphash_4u64 (690 samples, 0.18%)ipv4_conntrack_local (5,156 samples, 1.36%)nf_nat_inet_fn (43 samples, 0.01%)nf_hook_slow (6,435 samples, 1.70%)nf_nat_ipv4_local_fn (718 samples, 0.19%)nf_nat_inet_fn (362 samples, 0.10%)__ip_local_out (8,585 samples, 2.27%)_..nf_nat_ipv4_local_fn (174 samples, 0.05%)__rcu_read_lock (88 samples, 0.02%)__rcu_read_unlock (218 samples, 0.06%)__ip_finish_output (43 samples, 0.01%)__x86_indirect_thunk_rax (62 samples, 0.02%)apparmor_ip_postroute (470 samples, 0.12%)__usecs_to_jiffies (74 samples, 0.02%)_raw_spin_lock_irq (128 samples, 0.03%)_raw_spin_unlock_irq (76 samples, 0.02%)__netif_receive_skb_core.constprop.0 (52 samples, 0.01%)__netif_receive_skb_core.constprop.0 (564 samples, 0.15%)__rcu_read_unlock (54 samples, 0.01%)__rcu_read_unlock (89 samples, 0.02%)__rcu_read_lock (67 samples, 0.02%)__rcu_read_lock (57 samples, 0.02%)__rcu_read_unlock (105 samples, 0.03%)raw_local_deliver (389 samples, 0.10%)raw_v4_input (221 samples, 0.06%)raw_v4_input (38 samples, 0.01%)__udp4_lib_lookup (49 samples, 0.01%)__udp4_lib_lookup (504 samples, 0.13%)udp4_lib_lookup2 (352 samples, 0.09%)udp4_csum_init (812 samples, 0.21%)__udp_enqueue_schedule_skb (59 samples, 0.02%)__rcu_read_lock (61 samples, 0.02%)mem_cgroup_charge_skmem (57 samples, 0.02%)__sk_mem_raise_allocated (100 samples, 0.03%)__udp_enqueue_schedule_skb (1,313 samples, 0.35%)sock_def_readable (155 samples, 0.04%)__x86_indirect_thunk_rax (44 samples, 0.01%)__xfrm_policy_check2.constprop.0 (40 samples, 0.01%)_raw_spin_lock (493 samples, 0.13%)_raw_spin_unlock (136 samples, 0.04%)ipv4_pktinfo_prepare (380 samples, 0.10%)dst_release (281 samples, 0.07%)security_sock_rcv_skb (39 samples, 0.01%)apparmor_socket_sock_rcv_skb (106 samples, 0.03%)sk_filter_trim_cap (322 samples, 0.09%)security_sock_rcv_skb (72 samples, 0.02%)skb_pull_rcsum (53 samples, 0.01%)__udp4_lib_rcv (5,414 samples, 1.43%)udp_unicast_rcv_skb (3,792 samples, 1.00%)udp_queue_rcv_skb (3,762 samples, 0.99%)udp_queue_rcv_one_skb (3,636 samples, 0.96%)sock_def_readable (148 samples, 0.04%)udp4_csum_init (57 samples, 0.02%)ip_protocol_deliver_rcu (6,453 samples, 1.71%)udp_rcv (5,665 samples, 1.50%)udp_unicast_rcv_skb (127 samples, 0.03%)raw_local_deliver (40 samples, 0.01%)ip_local_deliver_finish (6,677 samples, 1.76%)nf_confirm (192 samples, 0.05%)nf_confirm (221 samples, 0.06%)nf_nat_inet_fn (41 samples, 0.01%)nf_hook_slow (767 samples, 0.20%)nf_nat_ipv4_local_in (316 samples, 0.08%)nf_nat_inet_fn (175 samples, 0.05%)ip_local_deliver (8,248 samples, 2.18%)i..nf_nat_ipv4_local_in (181 samples, 0.05%)ip_local_deliver_finish (75 samples, 0.02%)ip_rcv_core (309 samples, 0.08%)ip_rcv_finish_core.constprop.0 (355 samples, 0.09%)ip_sabotage_in (306 samples, 0.08%)ipv4_conntrack_defrag (138 samples, 0.04%)ipv4_conntrack_in (323 samples, 0.09%)ip_sabotage_in (69 samples, 0.02%)ipv4_conntrack_defrag (108 samples, 0.03%)ipv4_conntrack_in (282 samples, 0.07%)nf_conntrack_in (278 samples, 0.07%)nf_conntrack_in (57 samples, 0.02%)nf_hook_slow (1,381 samples, 0.36%)nf_nat_ipv4_pre_routing (432 samples, 0.11%)nf_nat_inet_fn (255 samples, 0.07%)ip_rcv (11,869 samples, 3.14%)ip_..nf_nat_ipv4_pre_routing (201 samples, 0.05%)__netif_receive_skb_one_core (12,879 samples, 3.40%)__n..nf_hook_slow (90 samples, 0.02%)__netif_receive_skb (12,980 samples, 3.43%)__n..__netif_receive_skb_one_core (61 samples, 0.02%)_raw_spin_lock_irq (716 samples, 0.19%)__napi_poll (14,705 samples, 3.89%)__na..process_backlog (14,356 samples, 3.79%)proc.._raw_spin_unlock_irq (225 samples, 0.06%)net_rx_action (15,607 samples, 4.12%)net_..process_backlog (195 samples, 0.05%)__do_softirq (16,392 samples, 4.33%)__do_..__x86_indirect_thunk_rax (38 samples, 0.01%)__local_bh_enable_ip (17,356 samples, 4.59%)__loc..do_softirq.part.0 (17,063 samples, 4.51%)do_so..net_rx_action (146 samples, 0.04%)do_softirq.part.0 (41 samples, 0.01%)__local_bh_enable_ip (78 samples, 0.02%)enqueue_to_backlog (49 samples, 0.01%)__raise_softirq_irqoff (57 samples, 0.02%)_raw_spin_unlock_irqrestore (46 samples, 0.01%)_raw_spin_lock_irqsave (321 samples, 0.08%)__raw_spin_lock_irqsave (320 samples, 0.08%)enqueue_to_backlog (898 samples, 0.24%)_raw_spin_unlock_irqrestore (120 samples, 0.03%)ktime_get_with_offset (165 samples, 0.04%)__netif_rx (2,105 samples, 0.56%)netif_rx_internal (1,977 samples, 0.52%)read_tsc (703 samples, 0.19%)__rcu_read_lock (82 samples, 0.02%)__rcu_read_unlock (38 samples, 0.01%)eth_type_trans (85 samples, 0.02%)skb_clone_tx_timestamp (158 samples, 0.04%)__wake_up_common (61 samples, 0.02%)_raw_read_unlock_irqrestore (65 samples, 0.02%)__task_rq_lock (52 samples, 0.01%)raw_spin_rq_lock_nested (48 samples, 0.01%)_raw_spin_lock (48 samples, 0.01%)native_queued_spin_lock_slowpath (48 samples, 0.01%)_raw_spin_lock_irqsave (273 samples, 0.07%)__raw_spin_lock_irqsave (273 samples, 0.07%)select_task_rq_fair (164 samples, 0.04%)wake_affine (42 samples, 0.01%)__smp_call_single_queue (224 samples, 0.06%)send_call_function_single_ipi (214 samples, 0.06%)native_send_call_func_single_ipi (117 samples, 0.03%)default_send_IPI_single_phys (115 samples, 0.03%)__default_send_IPI_dest_field (105 samples, 0.03%)llist_add_batch (149 samples, 0.04%)sched_clock_cpu (45 samples, 0.01%)sched_clock (44 samples, 0.01%)native_sched_clock (44 samples, 0.01%)ttwu_queue_wakelist (575 samples, 0.15%)try_to_wake_up (1,751 samples, 0.46%)default_wake_function (1,771 samples, 0.47%)__wake_up_common (1,874 samples, 0.50%)ep_autoremove_wake_function (1,787 samples, 0.47%)_raw_spin_lock_irqsave (179 samples, 0.05%)__raw_spin_lock_irqsave (177 samples, 0.05%)native_queued_spin_lock_slowpath (49 samples, 0.01%)__wake_up_common_lock (2,109 samples, 0.56%)__wake_up (2,115 samples, 0.56%)__raw_read_lock_irqsave (1,681 samples, 0.44%)queued_read_lock_slowpath (112 samples, 0.03%)_raw_read_lock_irqsave (1,685 samples, 0.45%)__wake_up_common (6,023 samples, 1.59%)ep_poll_callback (5,577 samples, 1.47%)_raw_read_unlock_irqrestore (549 samples, 0.15%)__raw_spin_lock_irqsave (2,457 samples, 0.65%)native_queued_spin_lock_slowpath (2,074 samples, 0.55%)_raw_spin_lock_irqsave (2,473 samples, 0.65%)_raw_spin_unlock_irqrestore (128 samples, 0.03%)__wake_up_common_lock (8,884 samples, 2.35%)_..ep_poll_callback (159 samples, 0.04%)loopback_xmit (13,069 samples, 3.45%)loo..sock_wfree (10,251 samples, 2.71%)so..__wake_up_sync_key (9,015 samples, 2.38%)__.._raw_spin_unlock_irqrestore (50 samples, 0.01%)skb_clone_tx_timestamp (43 samples, 0.01%)dev_hard_start_xmit (14,617 samples, 3.86%)dev_..sock_wfree (177 samples, 0.05%)loopback_xmit (247 samples, 0.07%)netdev_core_pick_tx (51 samples, 0.01%)qdisc_pkt_len_init (42 samples, 0.01%)netif_skb_features (232 samples, 0.06%)skb_network_protocol (98 samples, 0.03%)validate_xmit_skb (501 samples, 0.13%)validate_xmit_xfrm (46 samples, 0.01%)__dev_queue_xmit (16,029 samples, 4.24%)__dev..ip_finish_output2 (34,592 samples, 9.14%)ip_finish_out..neigh_hh_output (16,177 samples, 4.28%)neigh..ip_skb_dst_mtu (147 samples, 0.04%)__ip_finish_output (35,105 samples, 9.28%)__ip_finish_o..ip_finish_output (35,746 samples, 9.45%)ip_finish_outp..nf_confirm (341 samples, 0.09%)nf_confirm (376 samples, 0.10%)nf_hook_slow (1,736 samples, 0.46%)nf_nat_ipv4_out (902 samples, 0.24%)nf_nat_inet_fn (495 samples, 0.13%)ip_output (40,003 samples, 10.57%)ip_outputnf_nat_ipv4_out (557 samples, 0.15%)ip_send_skb (49,451 samples, 13.07%)ip_send_skbnf_hook_slow (95 samples, 0.03%)udp_send_skb (50,114 samples, 13.24%)udp_send_skbudp4_hwcsum (70 samples, 0.02%)inet_sendmsg (72,507 samples, 19.16%)inet_sendmsgudp_sendmsg (71,624 samples, 18.93%)udp_sendmsgaa_sk_perm (44 samples, 0.01%)security_socket_sendmsg (892 samples, 0.24%)apparmor_socket_sendmsg (549 samples, 0.15%)aa_sk_perm (524 samples, 0.14%)sock_sendmsg (74,871 samples, 19.79%)sock_sendmsgudp_sendmsg (43 samples, 0.01%)__fget_light (1,626 samples, 0.43%)__fdget (1,664 samples, 0.44%)__sys_sendto (79,939 samples, 21.13%)__sys_sendtosockfd_lookup_light (1,915 samples, 0.51%)__fget_light (125 samples, 0.03%)fput (954 samples, 0.25%)import_single_range (191 samples, 0.05%)sockfd_lookup_light (69 samples, 0.02%)__x64_sys_sendto (81,183 samples, 21.46%)__x64_sys_sendtoexit_to_user_mode_prepare (43 samples, 0.01%)merge_sched_in (55 samples, 0.01%)ctx_sched_in (112 samples, 0.03%)visit_groups_merge.constprop.0.isra.0 (111 samples, 0.03%)rb_next (43 samples, 0.01%)__perf_event_task_sched_in (131 samples, 0.03%)finish_task_switch.isra.0 (140 samples, 0.04%)exit_to_user_mode_loop (146 samples, 0.04%)schedule (145 samples, 0.04%)__schedule (145 samples, 0.04%)exit_to_user_mode_prepare (506 samples, 0.13%)do_syscall_64 (82,006 samples, 21.67%)do_syscall_64syscall_exit_to_user_mode (598 samples, 0.16%)syscall_enter_from_user_mode (42 samples, 0.01%)entry_SYSCALL_64_after_hwframe (82,860 samples, 21.90%)entry_SYSCALL_64_after_hwframesyscall_return_via_sysret (245 samples, 0.06%)__libc_sendto (83,829 samples, 22.16%)__libc_sendtostd::sys::pal::unix::cvt (143 samples, 0.04%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (83,973 samples, 22.19%)tokio::net::udp::UdpSocket::send_to..mio::net::udp::UdpSocket::send_to (83,973 samples, 22.19%)mio::net::udp::UdpSocket::send_tomio::io_source::IoSource<T>::do_io (83,973 samples, 22.19%)mio::io_source::IoSource<T>::do_iomio::sys::unix::stateless_io_source::IoSourceState::do_io (83,973 samples, 22.19%)mio::sys::unix::stateless_io_source..mio::net::udp::UdpSocket::send_to::{{closure}} (83,973 samples, 22.19%)mio::net::udp::UdpSocket::send_to::..std::net::udp::UdpSocket::send_to (83,973 samples, 22.19%)std::net::udp::UdpSocket::send_tostd::sys_common::net::UdpSocket::send_to (83,973 samples, 22.19%)std::sys_common::net::UdpSocket::se..core::result::Result<T,E>::is_err (368 samples, 0.10%)core::result::Result<T,E>::is_ok (368 samples, 0.10%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (420 samples, 0.11%)tokio::loom::std::mutex::Mutex<T>::lock (416 samples, 0.11%)std::sync::mutex::Mutex<T>::lock (415 samples, 0.11%)std::sys::sync::mutex::futex::Mutex::lock (412 samples, 0.11%)torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (85,524 samples, 22.60%)torrust_tracker::servers::udp::serve..tokio::net::udp::UdpSocket::send_to::{{closure}} (85,236 samples, 22.53%)tokio::net::udp::UdpSocket::send_to:..tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (84,989 samples, 22.46%)tokio::net::udp::UdpSocket::send_to_..tokio::runtime::io::registration::Registration::async_io::{{closure}} (84,879 samples, 22.43%)tokio::runtime::io::registration::Re..tokio::runtime::io::registration::Registration::readiness::{{closure}} (439 samples, 0.12%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (435 samples, 0.11%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (432 samples, 0.11%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (87,857 samples, 23.22%)torrust_tracker::servers::udp::server..torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (124,991 samples, 33.03%)torrust_tracker::servers::udp::server::Udp::process_r..torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (123,165 samples, 32.55%)torrust_tracker::servers::udp::server::Udp::process_v..<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (238 samples, 0.06%)core::sync::atomic::AtomicUsize::fetch_add (238 samples, 0.06%)core::sync::atomic::atomic_add (238 samples, 0.06%)futex_hash (62 samples, 0.02%)get_futex_key (42 samples, 0.01%)futex_wake (248 samples, 0.07%)wake_up_q (50 samples, 0.01%)try_to_wake_up (46 samples, 0.01%)do_futex (364 samples, 0.10%)__x64_sys_futex (407 samples, 0.11%)do_syscall_64 (468 samples, 0.12%)entry_SYSCALL_64_after_hwframe (496 samples, 0.13%)__GI___lll_lock_wake_private (552 samples, 0.15%)update_load_avg (52 samples, 0.01%)dequeue_entity (134 samples, 0.04%)dequeue_task (163 samples, 0.04%)dequeue_task_fair (160 samples, 0.04%)ctx_sched_out (43 samples, 0.01%)prepare_task_switch (66 samples, 0.02%)__perf_event_task_sched_out (59 samples, 0.02%)perf_event_context_sched_out (52 samples, 0.01%)psi_group_change (39 samples, 0.01%)psi_task_switch (63 samples, 0.02%)__schedule (390 samples, 0.10%)futex_wait_queue (413 samples, 0.11%)schedule (393 samples, 0.10%)futex_q_unlock (143 samples, 0.04%)futex_wait_setup (230 samples, 0.06%)do_futex (688 samples, 0.18%)futex_wait (678 samples, 0.18%)__x64_sys_futex (696 samples, 0.18%)__rseq_handle_notify_resume (45 samples, 0.01%)exit_to_user_mode_loop (57 samples, 0.02%)exit_to_user_mode_prepare (86 samples, 0.02%)entry_SYSCALL_64_after_hwframe (800 samples, 0.21%)do_syscall_64 (795 samples, 0.21%)syscall_exit_to_user_mode (93 samples, 0.02%)__GI___lll_lock_wait_private (860 samples, 0.23%)futex_wait (844 samples, 0.22%)_int_free (1,310 samples, 0.35%)__GI___libc_free (1,905 samples, 0.50%)syscall (58 samples, 0.02%)tokio::runtime::task::harness::Harness<T,S>::dealloc (47 samples, 0.01%)core::mem::drop (43 samples, 0.01%)core::ptr::drop_in_place<alloc::boxed::Box<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>>> (43 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>> (43 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::Core<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>> (43 samples, 0.01%)tokio::runtime::task::raw::drop_abort_handle (152 samples, 0.04%)tokio::runtime::task::harness::Harness<T,S>::drop_reference (139 samples, 0.04%)tokio::runtime::task::state::State::ref_dec (139 samples, 0.04%)core::ptr::drop_in_place<tokio::runtime::task::abort::AbortHandle> (2,198 samples, 0.58%)<tokio::runtime::task::abort::AbortHandle as core::ops::drop::Drop>::drop (2,198 samples, 0.58%)tokio::runtime::task::raw::RawTask::drop_abort_handle (2,195 samples, 0.58%)core::result::Result<T,E>::is_ok (58 samples, 0.02%)tokio::runtime::task::raw::drop_join_handle_slow (47 samples, 0.01%)tokio::runtime::task::harness::Harness<T,S>::drop_join_handle_slow (43 samples, 0.01%)tokio::runtime::task::raw::RawTask::drop_join_handle_slow (78 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::join::JoinHandle<()>> (199 samples, 0.05%)<tokio::runtime::task::join::JoinHandle<T> as core::ops::drop::Drop>::drop (199 samples, 0.05%)tokio::runtime::task::state::State::drop_join_handle_fast (47 samples, 0.01%)core::sync::atomic::AtomicUsize::compare_exchange_weak (47 samples, 0.01%)core::sync::atomic::atomic_compare_exchange_weak (47 samples, 0.01%)ringbuf::ring_buffer::base::RbBase::is_full (57 samples, 0.02%)ringbuf::ring_buffer::base::RbBase::vacant_len (47 samples, 0.01%)ringbuf::consumer::Consumer<T,R>::advance (64 samples, 0.02%)ringbuf::ring_buffer::base::RbRead::advance_head (64 samples, 0.02%)ringbuf::consumer::Consumer<T,R>::as_uninit_slices (38 samples, 0.01%)ringbuf::ring_buffer::base::RbRead::occupied_slices (38 samples, 0.01%)ringbuf::ring_buffer::rb::Rb::pop (114 samples, 0.03%)ringbuf::consumer::Consumer<T,R>::pop (114 samples, 0.03%)ringbuf::producer::Producer<T,R>::advance (46 samples, 0.01%)ringbuf::ring_buffer::base::RbWrite::advance_tail (46 samples, 0.01%)ringbuf::ring_buffer::rb::Rb::push_overwrite (260 samples, 0.07%)ringbuf::ring_buffer::rb::Rb::push (89 samples, 0.02%)ringbuf::producer::Producer<T,R>::push (89 samples, 0.02%)tokio::runtime::task::state::Snapshot::is_complete (52 samples, 0.01%)tokio::runtime::task::abort::AbortHandle::is_finished (58 samples, 0.02%)tokio::runtime::task::join::JoinHandle<T>::abort_handle (122 samples, 0.03%)tokio::runtime::task::raw::RawTask::ref_inc (122 samples, 0.03%)tokio::runtime::task::state::State::ref_inc (122 samples, 0.03%)core::sync::atomic::AtomicUsize::fetch_add (44 samples, 0.01%)core::sync::atomic::atomic_add (44 samples, 0.01%)dequeue_entity (83 samples, 0.02%)dequeue_task_fair (94 samples, 0.02%)dequeue_task (97 samples, 0.03%)psi_task_switch (52 samples, 0.01%)futex_wait_queue (252 samples, 0.07%)schedule (238 samples, 0.06%)__schedule (236 samples, 0.06%)futex_q_lock (38 samples, 0.01%)futex_q_unlock (146 samples, 0.04%)futex_wait_setup (238 samples, 0.06%)do_futex (520 samples, 0.14%)futex_wait (510 samples, 0.13%)__x64_sys_futex (538 samples, 0.14%)exit_to_user_mode_prepare (51 samples, 0.01%)do_syscall_64 (595 samples, 0.16%)syscall_exit_to_user_mode (52 samples, 0.01%)entry_SYSCALL_64_after_hwframe (603 samples, 0.16%)__GI___lll_lock_wait_private (649 samples, 0.17%)futex_wait (634 samples, 0.17%)futex_hash (66 samples, 0.02%)_raw_spin_lock (46 samples, 0.01%)native_queued_spin_lock_slowpath (46 samples, 0.01%)get_futex_key (40 samples, 0.01%)__smp_call_single_queue (57 samples, 0.02%)send_call_function_single_ipi (54 samples, 0.01%)try_to_wake_up (404 samples, 0.11%)ttwu_queue_wakelist (116 samples, 0.03%)futex_wake (701 samples, 0.19%)wake_up_q (423 samples, 0.11%)do_futex (823 samples, 0.22%)__x64_sys_futex (865 samples, 0.23%)exit_to_user_mode_prepare (56 samples, 0.01%)do_syscall_64 (958 samples, 0.25%)syscall_exit_to_user_mode (63 samples, 0.02%)entry_SYSCALL_64_after_hwframe (992 samples, 0.26%)__GI___lll_lock_wake_private (1,063 samples, 0.28%)malloc_consolidate (690 samples, 0.18%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (779 samples, 0.21%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (192 samples, 0.05%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (162 samples, 0.04%)_int_malloc (2,142 samples, 0.57%)__GI___libc_malloc (3,995 samples, 1.06%)alloc::vec::Vec<T>::with_capacity (4,013 samples, 1.06%)alloc::vec::Vec<T,A>::with_capacity_in (4,013 samples, 1.06%)alloc::raw_vec::RawVec<T,A>::with_capacity_in (4,006 samples, 1.06%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (4,006 samples, 1.06%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (4,004 samples, 1.06%)alloc::alloc::Global::alloc_impl (4,004 samples, 1.06%)alloc::alloc::alloc (4,004 samples, 1.06%)__rdl_alloc (4,004 samples, 1.06%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (4,004 samples, 1.06%)core::ptr::drop_in_place<alloc::sync::Arc<tokio::net::udp::UdpSocket>> (175 samples, 0.05%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (175 samples, 0.05%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (189 samples, 0.05%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (81 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (51 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (47 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::lock (46 samples, 0.01%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (42 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (144 samples, 0.04%)tokio::net::udp::UdpSocket::readable::{{closure}} (392 samples, 0.10%)tokio::net::udp::UdpSocket::ready::{{closure}} (390 samples, 0.10%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (181 samples, 0.05%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (172 samples, 0.05%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (135 samples, 0.04%)__GI___pthread_disable_asynccancel (75 samples, 0.02%)__x64_sys_recvfrom (179 samples, 0.05%)__sys_recvfrom (40 samples, 0.01%)__get_user_4 (139 samples, 0.04%)__put_user_nocheck_4 (875 samples, 0.23%)__check_object_size.part.0 (127 samples, 0.03%)check_stack_object (103 samples, 0.03%)__check_object_size (158 samples, 0.04%)move_addr_to_user (955 samples, 0.25%)copy_user_enhanced_fast_string (704 samples, 0.19%)apparmor_socket_recvmsg (80 samples, 0.02%)__check_object_size (53 samples, 0.01%)__skb_recv_udp (43 samples, 0.01%)__check_object_size.part.0 (45 samples, 0.01%)__check_heap_object (63 samples, 0.02%)__check_heap_object (309 samples, 0.08%)__virt_addr_valid (486 samples, 0.13%)check_heap_object (1,230 samples, 0.33%)__check_object_size.part.0 (1,550 samples, 0.41%)is_vmalloc_addr (91 samples, 0.02%)__check_object_size (1,697 samples, 0.45%)check_stack_object (122 samples, 0.03%)__skb_try_recv_from_queue (265 samples, 0.07%)_raw_spin_lock (56 samples, 0.01%)_raw_spin_unlock_bh (498 samples, 0.13%)__local_bh_enable_ip (441 samples, 0.12%)__refill_stock (48 samples, 0.01%)drain_stock (42 samples, 0.01%)__sk_mem_reduce_allocated (74 samples, 0.02%)mem_cgroup_uncharge_skmem (74 samples, 0.02%)refill_stock (61 samples, 0.02%)__skb_recv_udp (1,279 samples, 0.34%)udp_rmem_release (187 samples, 0.05%)_copy_to_iter (408 samples, 0.11%)_raw_spin_lock_bh (603 samples, 0.16%)_raw_spin_unlock_bh (38 samples, 0.01%)copy_user_enhanced_fast_string (2,356 samples, 0.62%)cmpxchg_double_slab.constprop.0.isra.0 (1,503 samples, 0.40%)__slab_free (3,062 samples, 0.81%)put_cpu_partial (75 samples, 0.02%)__unfreeze_partials (70 samples, 0.02%)cache_from_obj (75 samples, 0.02%)cmpxchg_double_slab.constprop.0.isra.0 (85 samples, 0.02%)kfree_skbmem (3,703 samples, 0.98%)kmem_cache_free (3,665 samples, 0.97%)cmpxchg_double_slab.constprop.0.isra.0 (404 samples, 0.11%)__slab_free (1,382 samples, 0.37%)put_cpu_partial (57 samples, 0.02%)__unfreeze_partials (54 samples, 0.01%)__kmem_cache_free (1,536 samples, 0.41%)__consume_stateless_skb (7,001 samples, 1.85%)_..skb_release_data (3,272 samples, 0.86%)skb_free_head (1,626 samples, 0.43%)kfree (1,609 samples, 0.43%)skb_consume_udp (7,079 samples, 1.87%)s..inet_recvmsg (15,244 samples, 4.03%)inet..udp_recvmsg (15,018 samples, 3.97%)udp_..security_socket_recvmsg (249 samples, 0.07%)apparmor_socket_recvmsg (171 samples, 0.05%)aa_sk_perm (146 samples, 0.04%)sock_recvmsg (15,648 samples, 4.14%)sock_..__fget_light (1,217 samples, 0.32%)__fdget (1,243 samples, 0.33%)__sys_recvfrom (19,882 samples, 5.25%)__sys_..sockfd_lookup_light (1,397 samples, 0.37%)__fget_light (44 samples, 0.01%)fput (482 samples, 0.13%)import_single_range (71 samples, 0.02%)__x64_sys_recvfrom (20,466 samples, 5.41%)__x64_s..exit_to_user_mode_prepare (364 samples, 0.10%)do_syscall_64 (21,043 samples, 5.56%)do_sysc..syscall_exit_to_user_mode (485 samples, 0.13%)entry_SYSCALL_64_after_hwframe (21,266 samples, 5.62%)entry_S..syscall_return_via_sysret (39 samples, 0.01%)__libc_recvfrom (21,805 samples, 5.76%)__libc_..tokio::net::udp::UdpSocket::recv_buf_from::{{closure}}::{{closure}} (21,997 samples, 5.81%)tokio::..mio::net::udp::UdpSocket::recv_from (21,912 samples, 5.79%)mio::ne..mio::io_source::IoSource<T>::do_io (21,912 samples, 5.79%)mio::io..mio::sys::unix::stateless_io_source::IoSourceState::do_io (21,912 samples, 5.79%)mio::sy..mio::net::udp::UdpSocket::recv_from::{{closure}} (21,912 samples, 5.79%)mio::ne..std::net::udp::UdpSocket::recv_from (21,912 samples, 5.79%)std::ne..std::sys_common::net::UdpSocket::recv_from (21,912 samples, 5.79%)std::sy..std::sys::pal::unix::net::Socket::recv_from (21,912 samples, 5.79%)std::sy..std::sys::pal::unix::net::Socket::recv_from_with_flags (21,912 samples, 5.79%)std::sy..std::sys_common::net::sockaddr_to_addr (45 samples, 0.01%)__GI___libc_malloc (49 samples, 0.01%)_int_malloc (51 samples, 0.01%)core::result::Result<T,E>::is_err (45 samples, 0.01%)core::result::Result<T,E>::is_ok (45 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (69 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (63 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (58 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (58 samples, 0.02%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (282 samples, 0.07%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (238 samples, 0.06%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (119 samples, 0.03%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (27,792 samples, 7.35%)torrust_tr..tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (22,942 samples, 6.06%)tokio::n..tokio::runtime::io::registration::Registration::async_io::{{closure}} (22,924 samples, 6.06%)tokio::r..__memcpy_avx512_unaligned_erms (92 samples, 0.02%)__memcpy_avx512_unaligned_erms (2,847 samples, 0.75%)__memcpy_avx512_unaligned_erms (837 samples, 0.22%)core::ptr::drop_in_place<core::cell::RefMut<core::option::Option<alloc::boxed::Box<tokio::runtime::scheduler::multi_thread::worker::Core>>>> (42 samples, 0.01%)core::ptr::drop_in_place<core::cell::BorrowRefMut> (42 samples, 0.01%)<core::cell::BorrowRefMut as core::ops::drop::Drop>::drop (42 samples, 0.01%)core::cell::Cell<T>::set (42 samples, 0.01%)core::cell::Cell<T>::replace (42 samples, 0.01%)core::mem::replace (42 samples, 0.01%)core::ptr::write (42 samples, 0.01%)__x64_sys_futex (80 samples, 0.02%)futex_hash (110 samples, 0.03%)futex_wake_mark (43 samples, 0.01%)get_futex_key (112 samples, 0.03%)wake_q_add_safe (39 samples, 0.01%)_raw_spin_lock_irqsave (93 samples, 0.02%)__raw_spin_lock_irqsave (93 samples, 0.02%)select_task_rq_fair (322 samples, 0.09%)wake_affine (103 samples, 0.03%)available_idle_cpu (73 samples, 0.02%)__smp_call_single_queue (251 samples, 0.07%)send_call_function_single_ipi (243 samples, 0.06%)native_send_call_func_single_ipi (94 samples, 0.02%)default_send_IPI_single_phys (94 samples, 0.02%)__default_send_IPI_dest_field (85 samples, 0.02%)llist_add_batch (231 samples, 0.06%)sched_clock_cpu (68 samples, 0.02%)sched_clock (61 samples, 0.02%)native_sched_clock (61 samples, 0.02%)ttwu_queue_wakelist (704 samples, 0.19%)try_to_wake_up (1,611 samples, 0.43%)futex_wake (2,206 samples, 0.58%)wake_up_q (1,679 samples, 0.44%)do_futex (2,433 samples, 0.64%)__x64_sys_futex (2,563 samples, 0.68%)do_futex (62 samples, 0.02%)do_syscall_64 (2,732 samples, 0.72%)syscall_exit_to_user_mode (68 samples, 0.02%)exit_to_user_mode_prepare (58 samples, 0.02%)entry_SYSCALL_64_after_hwframe (2,827 samples, 0.75%)syscall (2,981 samples, 0.79%)core::ptr::drop_in_place<core::option::Option<tokio::runtime::task::Notified<alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>>> (83 samples, 0.02%)core::sync::atomic::AtomicU32::store (58 samples, 0.02%)core::sync::atomic::atomic_store (58 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::push_back_finish (83 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::push_back_or_overflow (115 samples, 0.03%)tokio::runtime::context::with_scheduler (451 samples, 0.12%)std::thread::local::LocalKey<T>::try_with (403 samples, 0.11%)tokio::runtime::context::with_scheduler::{{closure}} (394 samples, 0.10%)tokio::runtime::context::scoped::Scoped<T>::with (387 samples, 0.10%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (385 samples, 0.10%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (381 samples, 0.10%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (338 samples, 0.09%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (59 samples, 0.02%)alloc::vec::Vec<T,A>::pop (85 samples, 0.02%)core::ptr::read (56 samples, 0.01%)std::sync::mutex::MutexGuard<T>::new (55 samples, 0.01%)std::sync::poison::Flag::guard (55 samples, 0.01%)std::thread::panicking (52 samples, 0.01%)std::panicking::panicking (52 samples, 0.01%)std::panicking::panic_count::count_is_zero (52 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (156 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (145 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (90 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock_contended (43 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (2,704 samples, 0.71%)core::sync::atomic::atomic_add (2,704 samples, 0.71%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (3,112 samples, 0.82%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (2,853 samples, 0.75%)tokio::runtime::scheduler::multi_thread::idle::State::num_unparked (80 samples, 0.02%)__fget_light (82 samples, 0.02%)__fdget_pos (85 samples, 0.02%)try_to_wake_up (70 samples, 0.02%)__wake_up_common (74 samples, 0.02%)ep_autoremove_wake_function (71 samples, 0.02%)default_wake_function (71 samples, 0.02%)_raw_spin_lock_irqsave (68 samples, 0.02%)__raw_spin_lock_irqsave (68 samples, 0.02%)native_queued_spin_lock_slowpath (58 samples, 0.02%)__wake_up_common_lock (144 samples, 0.04%)__wake_up (145 samples, 0.04%)_raw_read_lock_irqsave (177 samples, 0.05%)__raw_read_lock_irqsave (177 samples, 0.05%)__wake_up_common (458 samples, 0.12%)ep_poll_callback (447 samples, 0.12%)__wake_up_locked_key (461 samples, 0.12%)_raw_spin_lock_irq (139 samples, 0.04%)eventfd_write (716 samples, 0.19%)copy_user_enhanced_fast_string (76 samples, 0.02%)__x64_sys_write (884 samples, 0.23%)ksys_write (873 samples, 0.23%)vfs_write (758 samples, 0.20%)do_syscall_64 (930 samples, 0.25%)syscall_exit_to_user_mode (40 samples, 0.01%)entry_SYSCALL_64_after_hwframe (950 samples, 0.25%)__GI___libc_write (1,017 samples, 0.27%)__GI___libc_write (1,014 samples, 0.27%)tokio::runtime::driver::Handle::unpark (1,023 samples, 0.27%)tokio::runtime::driver::IoHandle::unpark (1,023 samples, 0.27%)tokio::runtime::io::driver::Handle::unpark (1,023 samples, 0.27%)mio::waker::Waker::wake (1,022 samples, 0.27%)mio::sys::unix::waker::fdbased::Waker::wake (1,022 samples, 0.27%)mio::sys::unix::waker::eventfd::WakerInternal::wake (1,022 samples, 0.27%)<&std::fs::File as std::io::Write>::write (1,021 samples, 0.27%)std::sys::pal::unix::fs::File::write (1,021 samples, 0.27%)std::sys::pal::unix::fd::FileDesc::write (1,021 samples, 0.27%)tokio::runtime::context::with_scheduler (7,649 samples, 2.02%)t..std::thread::local::LocalKey<T>::try_with (7,636 samples, 2.02%)s..tokio::runtime::context::with_scheduler::{{closure}} (7,635 samples, 2.02%)t..tokio::runtime::context::scoped::Scoped<T>::with (7,634 samples, 2.02%)t..tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (7,634 samples, 2.02%)t..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (7,634 samples, 2.02%)t..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (7,591 samples, 2.01%)t..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (7,591 samples, 2.01%)t..tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (1,047 samples, 0.28%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (1,043 samples, 0.28%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_option_task_without_yield (7,948 samples, 2.10%)t..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task (7,946 samples, 2.10%)t..tokio::runtime::scheduler::multi_thread::worker::with_current (7,946 samples, 2.10%)t..tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (90 samples, 0.02%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::push_front (63 samples, 0.02%)core::result::Result<T,E>::is_err (409 samples, 0.11%)core::result::Result<T,E>::is_ok (409 samples, 0.11%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (992 samples, 0.26%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::lock_shard (797 samples, 0.21%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (781 samples, 0.21%)tokio::loom::std::mutex::Mutex<T>::lock (773 samples, 0.20%)std::sync::mutex::Mutex<T>::lock (773 samples, 0.20%)std::sys::sync::mutex::futex::Mutex::lock (769 samples, 0.20%)core::sync::atomic::AtomicU32::compare_exchange (360 samples, 0.10%)core::sync::atomic::atomic_compare_exchange (360 samples, 0.10%)__memcpy_avx512_unaligned_erms (410 samples, 0.11%)__GI___lll_lock_wake_private (103 samples, 0.03%)__memcpy_avx512_unaligned_erms (180 samples, 0.05%)update_curr (47 samples, 0.01%)update_load_avg (57 samples, 0.02%)dequeue_entity (144 samples, 0.04%)dequeue_task (169 samples, 0.04%)dequeue_task_fair (166 samples, 0.04%)ctx_sched_out (41 samples, 0.01%)prepare_task_switch (61 samples, 0.02%)__perf_event_task_sched_out (56 samples, 0.01%)perf_event_context_sched_out (48 samples, 0.01%)psi_group_change (57 samples, 0.02%)psi_task_switch (96 samples, 0.03%)__schedule (428 samples, 0.11%)futex_wait_queue (471 samples, 0.12%)schedule (433 samples, 0.11%)futex_q_lock (46 samples, 0.01%)futex_q_unlock (217 samples, 0.06%)futex_wait_setup (344 samples, 0.09%)futex_wait (864 samples, 0.23%)do_futex (877 samples, 0.23%)__x64_sys_futex (885 samples, 0.23%)__rseq_handle_notify_resume (77 samples, 0.02%)exit_to_user_mode_loop (96 samples, 0.03%)exit_to_user_mode_prepare (127 samples, 0.03%)do_syscall_64 (1,027 samples, 0.27%)syscall_exit_to_user_mode (136 samples, 0.04%)entry_SYSCALL_64_after_hwframe (1,035 samples, 0.27%)__GI___lll_lock_wait_private (1,121 samples, 0.30%)futex_wait (1,095 samples, 0.29%)futex_hash (66 samples, 0.02%)__smp_call_single_queue (40 samples, 0.01%)send_call_function_single_ipi (39 samples, 0.01%)futex_wake (608 samples, 0.16%)wake_up_q (362 samples, 0.10%)try_to_wake_up (353 samples, 0.09%)ttwu_queue_wakelist (78 samples, 0.02%)do_futex (729 samples, 0.19%)__x64_sys_futex (747 samples, 0.20%)exit_to_user_mode_prepare (44 samples, 0.01%)do_syscall_64 (822 samples, 0.22%)syscall_exit_to_user_mode (54 samples, 0.01%)entry_SYSCALL_64_after_hwframe (838 samples, 0.22%)__GI___lll_lock_wake_private (916 samples, 0.24%)_int_free (220 samples, 0.06%)__alloc_pages (48 samples, 0.01%)do_anonymous_page (127 samples, 0.03%)vma_alloc_folio (59 samples, 0.02%)__folio_alloc (52 samples, 0.01%)handle_pte_fault (132 samples, 0.03%)__handle_mm_fault (136 samples, 0.04%)handle_mm_fault (153 samples, 0.04%)do_user_addr_fault (170 samples, 0.04%)asm_exc_page_fault (177 samples, 0.05%)exc_page_fault (175 samples, 0.05%)perf_event_mmap_output (55 samples, 0.01%)perf_event_mmap_event (115 samples, 0.03%)perf_iterate_sb (111 samples, 0.03%)perf_iterate_ctx (100 samples, 0.03%)perf_event_mmap (124 samples, 0.03%)mas_preallocate (48 samples, 0.01%)mas_alloc_nodes (48 samples, 0.01%)mas_wr_store_entry.isra.0 (43 samples, 0.01%)mas_store_prealloc (73 samples, 0.02%)__vma_adjust (154 samples, 0.04%)vma_mas_store (78 samples, 0.02%)vma_merge (173 samples, 0.05%)mprotect_fixup (330 samples, 0.09%)do_mprotect_pkey (384 samples, 0.10%)__x64_sys_mprotect (390 samples, 0.10%)grow_heap (401 samples, 0.11%)__GI_mprotect (399 samples, 0.11%)entry_SYSCALL_64_after_hwframe (395 samples, 0.10%)do_syscall_64 (392 samples, 0.10%)sysmalloc (595 samples, 0.16%)_int_malloc (1,239 samples, 0.33%)unlink_chunk (55 samples, 0.01%)_int_memalign (1,518 samples, 0.40%)core::option::Option<T>::map (14,436 samples, 3.82%)core..tokio::task::spawn::spawn_inner::{{closure}} (14,430 samples, 3.81%)toki..tokio::runtime::scheduler::Handle::spawn (14,425 samples, 3.81%)toki..tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (14,422 samples, 3.81%)toki..tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (14,411 samples, 3.81%)toki..tokio::runtime::task::list::OwnedTasks<S>::bind (5,536 samples, 1.46%)tokio::runtime::task::new_task (4,490 samples, 1.19%)tokio::runtime::task::raw::RawTask::new (4,490 samples, 1.19%)tokio::runtime::task::core::Cell<T,S>::new (4,490 samples, 1.19%)alloc::boxed::Box<T>::new (4,034 samples, 1.07%)alloc::alloc::exchange_malloc (3,715 samples, 0.98%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (3,702 samples, 0.98%)alloc::alloc::Global::alloc_impl (3,702 samples, 0.98%)alloc::alloc::alloc (3,702 samples, 0.98%)__rdl_alloc (3,702 samples, 0.98%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (3,702 samples, 0.98%)std::sys::pal::unix::alloc::aligned_malloc (3,702 samples, 0.98%)__posix_memalign (3,659 samples, 0.97%)__posix_memalign (3,656 samples, 0.97%)_mid_memalign (3,656 samples, 0.97%)tokio::runtime::context::current::with_current (17,481 samples, 4.62%)tokio..std::thread::local::LocalKey<T>::try_with (17,476 samples, 4.62%)std::..tokio::runtime::context::current::with_current::{{closure}} (17,340 samples, 4.58%)tokio..tokio::task::spawn::spawn (17,548 samples, 4.64%)tokio..tokio::task::spawn::spawn_inner (17,548 samples, 4.64%)tokio..tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (173,936 samples, 45.97%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_muttokio::runtime::task::core::Core<T,S>::poll::{{closure}} (173,935 samples, 45.97%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}}torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (48,629 samples, 12.85%)torrust_tracker::se..torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (48,628 samples, 12.85%)torrust_tracker::se..torrust_tracker::servers::udp::server::Udp::spawn_request_processor (17,569 samples, 4.64%)torru..__memcpy_avx512_unaligned_erms (51 samples, 0.01%)__memcpy_avx512_unaligned_erms (2,052 samples, 0.54%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (2,067 samples, 0.55%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (2,067 samples, 0.55%)tokio::runtime::task::core::Core<T,S>::poll (176,078 samples, 46.54%)tokio::runtime::task::core::Core<T,S>::polltokio::runtime::task::core::Core<T,S>::drop_future_or_output (2,140 samples, 0.57%)tokio::runtime::task::core::Core<T,S>::set_stage (2,138 samples, 0.57%)__memcpy_avx512_unaligned_erms (48 samples, 0.01%)__memcpy_avx512_unaligned_erms (856 samples, 0.23%)__memcpy_avx512_unaligned_erms (1,693 samples, 0.45%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (1,703 samples, 0.45%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (1,702 samples, 0.45%)tokio::runtime::task::core::Core<T,S>::set_stage (2,570 samples, 0.68%)tokio::runtime::task::harness::poll_future (178,737 samples, 47.24%)tokio::runtime::task::harness::poll_futurestd::panic::catch_unwind (178,737 samples, 47.24%)std::panic::catch_unwindstd::panicking::try (178,737 samples, 47.24%)std::panicking::trystd::panicking::try::do_call (178,737 samples, 47.24%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (178,737 samples, 47.24%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<..tokio::runtime::task::harness::poll_future::{{closure}} (178,737 samples, 47.24%)tokio::runtime::task::harness::poll_future::{{closure}}tokio::runtime::task::core::Core<T,S>::store_output (2,659 samples, 0.70%)tokio::runtime::task::state::State::transition_to_running (348 samples, 0.09%)tokio::runtime::task::state::State::fetch_update_action (348 samples, 0.09%)tokio::runtime::task::raw::poll (181,363 samples, 47.93%)tokio::runtime::task::raw::polltokio::runtime::task::harness::Harness<T,S>::poll (181,220 samples, 47.90%)tokio::runtime::task::harness::Harness<T,S>::polltokio::runtime::task::harness::Harness<T,S>::poll_inner (179,097 samples, 47.33%)tokio::runtime::task::harness::Harness<T,S>::poll_innertokio::runtime::coop::budget (181,972 samples, 48.09%)tokio::runtime::coop::budgettokio::runtime::coop::with_budget (181,972 samples, 48.09%)tokio::runtime::coop::with_budgettokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (181,912 samples, 48.08%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}}tokio::runtime::task::LocalNotified<S>::run (181,909 samples, 48.08%)tokio::runtime::task::LocalNotified<S>::runtokio::runtime::task::raw::RawTask::poll (181,909 samples, 48.08%)tokio::runtime::task::raw::RawTask::polltokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (110 samples, 0.03%)__x64_sys_futex (295 samples, 0.08%)_raw_spin_lock (91 samples, 0.02%)futex_hash (195 samples, 0.05%)futex_wake_mark (84 samples, 0.02%)get_futex_key (163 samples, 0.04%)wake_q_add_safe (121 samples, 0.03%)select_task_rq_fair (60 samples, 0.02%)_raw_spin_lock_irqsave (167 samples, 0.04%)__raw_spin_lock_irqsave (164 samples, 0.04%)available_idle_cpu (62 samples, 0.02%)select_idle_sibling (121 samples, 0.03%)select_task_rq_fair (1,048 samples, 0.28%)wake_affine (382 samples, 0.10%)available_idle_cpu (315 samples, 0.08%)__smp_call_single_queue (294 samples, 0.08%)send_call_function_single_ipi (280 samples, 0.07%)native_send_call_func_single_ipi (53 samples, 0.01%)default_send_IPI_single_phys (53 samples, 0.01%)__default_send_IPI_dest_field (49 samples, 0.01%)llist_add_batch (533 samples, 0.14%)sched_clock_cpu (152 samples, 0.04%)sched_clock (144 samples, 0.04%)native_sched_clock (143 samples, 0.04%)ttwu_queue_wakelist (1,388 samples, 0.37%)try_to_wake_up (3,169 samples, 0.84%)futex_wake (4,300 samples, 1.14%)wake_up_q (3,312 samples, 0.88%)do_futex (4,709 samples, 1.24%)__x64_sys_futex (4,862 samples, 1.28%)do_futex (47 samples, 0.01%)exit_to_user_mode_prepare (62 samples, 0.02%)do_syscall_64 (5,079 samples, 1.34%)syscall_exit_to_user_mode (90 samples, 0.02%)entry_SYSCALL_64_after_hwframe (5,409 samples, 1.43%)syscall (5,655 samples, 1.49%)tokio::loom::std::mutex::Mutex<T>::lock (75 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (71 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (49 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (1,216 samples, 0.32%)core::sync::atomic::atomic_add (1,216 samples, 0.32%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (1,348 samples, 0.36%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (1,238 samples, 0.33%)fput (40 samples, 0.01%)__fget_light (155 samples, 0.04%)__fdget_pos (165 samples, 0.04%)_raw_spin_lock_irqsave (39 samples, 0.01%)__raw_spin_lock_irqsave (39 samples, 0.01%)__wake_up_common (172 samples, 0.05%)ep_autoremove_wake_function (163 samples, 0.04%)default_wake_function (162 samples, 0.04%)try_to_wake_up (159 samples, 0.04%)ttwu_queue_wakelist (43 samples, 0.01%)__wake_up_common_lock (308 samples, 0.08%)_raw_spin_lock_irqsave (131 samples, 0.03%)__raw_spin_lock_irqsave (131 samples, 0.03%)native_queued_spin_lock_slowpath (121 samples, 0.03%)__wake_up (311 samples, 0.08%)_raw_read_lock_irqsave (369 samples, 0.10%)__raw_read_lock_irqsave (368 samples, 0.10%)__wake_up_common (972 samples, 0.26%)ep_poll_callback (937 samples, 0.25%)_raw_read_unlock_irqrestore (39 samples, 0.01%)__wake_up_locked_key (989 samples, 0.26%)_copy_from_user (42 samples, 0.01%)_raw_spin_lock_irq (428 samples, 0.11%)eventfd_write (1,641 samples, 0.43%)copy_user_enhanced_fast_string (139 samples, 0.04%)ksys_write (1,936 samples, 0.51%)vfs_write (1,722 samples, 0.46%)rw_verify_area (42 samples, 0.01%)security_file_permission (38 samples, 0.01%)__x64_sys_write (1,980 samples, 0.52%)exit_to_user_mode_prepare (42 samples, 0.01%)do_syscall_64 (2,071 samples, 0.55%)syscall_exit_to_user_mode (74 samples, 0.02%)entry_SYSCALL_64_after_hwframe (2,113 samples, 0.56%)__GI___libc_write (2,270 samples, 0.60%)__GI___libc_write (2,271 samples, 0.60%)mio::sys::unix::waker::eventfd::WakerInternal::wake (2,284 samples, 0.60%)<&std::fs::File as std::io::Write>::write (2,277 samples, 0.60%)std::sys::pal::unix::fs::File::write (2,277 samples, 0.60%)std::sys::pal::unix::fd::FileDesc::write (2,277 samples, 0.60%)__entry_text_start (206 samples, 0.05%)tokio::runtime::driver::Handle::unpark (2,522 samples, 0.67%)tokio::runtime::driver::IoHandle::unpark (2,522 samples, 0.67%)tokio::runtime::io::driver::Handle::unpark (2,522 samples, 0.67%)mio::waker::Waker::wake (2,518 samples, 0.67%)mio::sys::unix::waker::fdbased::Waker::wake (2,518 samples, 0.67%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (234 samples, 0.06%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (234 samples, 0.06%)tokio::runtime::driver::Handle::unpark (233 samples, 0.06%)tokio::runtime::driver::IoHandle::unpark (233 samples, 0.06%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (2,547 samples, 0.67%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (2,543 samples, 0.67%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (191,736 samples, 50.67%)tokio::runtime::scheduler::multi_thread::worker::Context::run_tasktokio::runtime::scheduler::multi_thread::worker::Core::transition_from_searching (9,564 samples, 2.53%)to..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::transition_worker_from_searching (9,562 samples, 2.53%)to..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (9,559 samples, 2.53%)to..tokio::runtime::scheduler::multi_thread::worker::Core::tune_global_queue_interval (67 samples, 0.02%)tokio::runtime::scheduler::multi_thread::stats::Stats::tuned_global_queue_interval (65 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (162 samples, 0.04%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (201 samples, 0.05%)alloc::sync::Arc<T,A>::inner (201 samples, 0.05%)core::ptr::non_null::NonNull<T>::as_ref (201 samples, 0.05%)core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next (327 samples, 0.09%)<core::ops::range::Range<T> as core::iter::range::RangeIteratorImpl>::spec_next (327 samples, 0.09%)core::cmp::impls::<impl core::cmp::PartialOrd for usize>::lt (326 samples, 0.09%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (127 samples, 0.03%)alloc::sync::Arc<T,A>::inner (127 samples, 0.03%)core::ptr::non_null::NonNull<T>::as_ref (127 samples, 0.03%)core::num::<impl u32>::wrapping_sub (314 samples, 0.08%)core::sync::atomic::AtomicU64::load (129 samples, 0.03%)core::sync::atomic::atomic_load (129 samples, 0.03%)tokio::loom::std::atomic_u32::AtomicU32::unsync_load (110 samples, 0.03%)core::sync::atomic::AtomicU32::load (110 samples, 0.03%)core::sync::atomic::atomic_load (110 samples, 0.03%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (168 samples, 0.04%)alloc::sync::Arc<T,A>::inner (168 samples, 0.04%)core::ptr::non_null::NonNull<T>::as_ref (168 samples, 0.04%)core::num::<impl u32>::wrapping_add (92 samples, 0.02%)core::num::<impl u32>::wrapping_sub (172 samples, 0.05%)core::sync::atomic::AtomicU32::load (163 samples, 0.04%)core::sync::atomic::atomic_load (163 samples, 0.04%)core::sync::atomic::AtomicU64::load (405 samples, 0.11%)core::sync::atomic::atomic_load (405 samples, 0.11%)tokio::runtime::scheduler::multi_thread::queue::pack (166 samples, 0.04%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (2,781 samples, 0.73%)tokio::runtime::scheduler::multi_thread::queue::unpack (540 samples, 0.14%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (3,784 samples, 1.00%)tokio::runtime::scheduler::multi_thread::queue::unpack (134 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_searching (423 samples, 0.11%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_searching (99 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (8,170 samples, 2.16%)t..tokio::util::rand::FastRand::fastrand_n (71 samples, 0.02%)tokio::util::rand::FastRand::fastrand (71 samples, 0.02%)std::panic::catch_unwind (262,281 samples, 69.32%)std::panic::catch_unwindstd::panicking::try (262,281 samples, 69.32%)std::panicking::trystd::panicking::try::do_call (262,281 samples, 69.32%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (262,281 samples, 69.32%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_oncestd::thread::Builder::spawn_unchecked_::{{closure}}::{{closure}} (262,281 samples, 69.32%)std::thread::Builder::spawn_unchecked_::{{closure}}::{{closure}}std::sys_common::backtrace::__rust_begin_short_backtrace (262,281 samples, 69.32%)std::sys_common::backtrace::__rust_begin_short_backtracetokio::runtime::blocking::pool::Spawner::spawn_thread::{{closure}} (262,281 samples, 69.32%)tokio::runtime::blocking::pool::Spawner::spawn_thread::{{closure}}tokio::runtime::blocking::pool::Inner::run (262,281 samples, 69.32%)tokio::runtime::blocking::pool::Inner::runtokio::runtime::blocking::pool::Task::run (262,211 samples, 69.30%)tokio::runtime::blocking::pool::Task::runtokio::runtime::task::UnownedTask<S>::run (262,211 samples, 69.30%)tokio::runtime::task::UnownedTask<S>::runtokio::runtime::task::raw::RawTask::poll (262,211 samples, 69.30%)tokio::runtime::task::raw::RawTask::polltokio::runtime::task::raw::poll (262,211 samples, 69.30%)tokio::runtime::task::raw::polltokio::runtime::task::harness::Harness<T,S>::poll (262,211 samples, 69.30%)tokio::runtime::task::harness::Harness<T,S>::polltokio::runtime::task::harness::Harness<T,S>::poll_inner (262,211 samples, 69.30%)tokio::runtime::task::harness::Harness<T,S>::poll_innertokio::runtime::task::harness::poll_future (262,210 samples, 69.30%)tokio::runtime::task::harness::poll_futurestd::panic::catch_unwind (262,210 samples, 69.30%)std::panic::catch_unwindstd::panicking::try (262,210 samples, 69.30%)std::panicking::trystd::panicking::try::do_call (262,210 samples, 69.30%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (262,210 samples, 69.30%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_oncetokio::runtime::task::harness::poll_future::{{closure}} (262,210 samples, 69.30%)tokio::runtime::task::harness::poll_future::{{closure}}tokio::runtime::task::core::Core<T,S>::poll (262,210 samples, 69.30%)tokio::runtime::task::core::Core<T,S>::polltokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (262,210 samples, 69.30%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_muttokio::runtime::task::core::Core<T,S>::poll::{{closure}} (262,210 samples, 69.30%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}}<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (262,210 samples, 69.30%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::polltokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (262,210 samples, 69.30%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}}tokio::runtime::scheduler::multi_thread::worker::run (262,210 samples, 69.30%)tokio::runtime::scheduler::multi_thread::worker::runtokio::runtime::context::runtime::enter_runtime (262,210 samples, 69.30%)tokio::runtime::context::runtime::enter_runtimetokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (262,210 samples, 69.30%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}tokio::runtime::context::set_scheduler (262,210 samples, 69.30%)tokio::runtime::context::set_schedulerstd::thread::local::LocalKey<T>::with (262,210 samples, 69.30%)std::thread::local::LocalKey<T>::withstd::thread::local::LocalKey<T>::try_with (262,210 samples, 69.30%)std::thread::local::LocalKey<T>::try_withtokio::runtime::context::set_scheduler::{{closure}} (262,210 samples, 69.30%)tokio::runtime::context::set_scheduler::{{closure}}tokio::runtime::context::scoped::Scoped<T>::set (262,210 samples, 69.30%)tokio::runtime::context::scoped::Scoped<T>::settokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (262,210 samples, 69.30%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}}tokio::runtime::scheduler::multi_thread::worker::Context::run (262,210 samples, 69.30%)tokio::runtime::scheduler::multi_thread::worker::Context::run<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once (262,282 samples, 69.32%)<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once (262,282 samples, 69.32%)<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_oncecore::ops::function::FnOnce::call_once{{vtable.shim}} (262,282 samples, 69.32%)core::ops::function::FnOnce::call_once{{vtable.shim}}std::thread::Builder::spawn_unchecked_::{{closure}} (262,282 samples, 69.32%)std::thread::Builder::spawn_unchecked_::{{closure}}clone3 (262,316 samples, 69.33%)clone3start_thread (262,316 samples, 69.33%)start_threadstd::sys::pal::unix::thread::Thread::new::thread_start (262,299 samples, 69.32%)std::sys::pal::unix::thread::Thread::new::thread_startcore::fmt::Formatter::pad_integral (390 samples, 0.10%)core::fmt::Formatter::pad_integral::write_prefix (126 samples, 0.03%)core::fmt::Formatter::pad_integral (41 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (113 samples, 0.03%)__x64_sys_futex (173 samples, 0.05%)__x64_sys_getsockname (51 samples, 0.01%)__x64_sys_sendto (43 samples, 0.01%)__x64_sys_epoll_wait (165 samples, 0.04%)read_tsc (70 samples, 0.02%)futex_hash (127 samples, 0.03%)futex_wait_setup (101 samples, 0.03%)get_futex_key (39 samples, 0.01%)futex_wait (270 samples, 0.07%)futex_wake (506 samples, 0.13%)get_futex_key (181 samples, 0.05%)do_futex (1,060 samples, 0.28%)__x64_sys_futex (1,255 samples, 0.33%)__fget_light (87 samples, 0.02%)__fdget (88 samples, 0.02%)__sys_getsockname (168 samples, 0.04%)sockfd_lookup_light (98 samples, 0.03%)__x64_sys_getsockname (171 samples, 0.05%)__fget_light (55 samples, 0.01%)__fdget (57 samples, 0.02%)__sys_recvfrom (187 samples, 0.05%)sockfd_lookup_light (72 samples, 0.02%)__x64_sys_recvfrom (198 samples, 0.05%)__sys_sendto (165 samples, 0.04%)sockfd_lookup_light (51 samples, 0.01%)__x64_sys_sendto (177 samples, 0.05%)do_futex (48 samples, 0.01%)do_syscall_64 (2,325 samples, 0.61%)syscall_enter_from_user_mode (175 samples, 0.05%)entry_SYSCALL_64_after_hwframe (3,067 samples, 0.81%)syscall_enter_from_user_mode (207 samples, 0.05%)entry_SYSCALL_64_safe_stack (222 samples, 0.06%)rand_chacha::guts::round (69 samples, 0.02%)rand_chacha::guts::refill_wide::impl_avx2 (80 samples, 0.02%)rand_chacha::guts::refill_wide::fn_impl (80 samples, 0.02%)rand_chacha::guts::refill_wide_impl (80 samples, 0.02%)core::cell::RefCell<T>::borrow_mut (113 samples, 0.03%)core::cell::RefCell<T>::try_borrow_mut (113 samples, 0.03%)core::cell::BorrowRefMut::new (113 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (156 samples, 0.04%)tokio::runtime::coop::budget (154 samples, 0.04%)tokio::runtime::coop::with_budget (154 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (136 samples, 0.04%)std::sys::pal::unix::time::Timespec::now (305 samples, 0.08%)std::sys::pal::unix::time::Timespec::sub_timespec (148 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock_contended (110 samples, 0.03%)std::sys_common::net::TcpListener::socket_addr (40 samples, 0.01%)syscall (90 samples, 0.02%)syscall_return_via_sysret (133 samples, 0.04%)std::sync::poison::Flag::done (42 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::util::linked_list::LinkedList<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>,tokio::runtime::task::core::Header>>> (47 samples, 0.01%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (47 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (71 samples, 0.02%)tokio::runtime::task::list::OwnedTasks<S>::remove (71 samples, 0.02%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (58 samples, 0.02%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (113 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (113 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::run (113 samples, 0.03%)tokio::runtime::context::runtime::enter_runtime (113 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (113 samples, 0.03%)tokio::runtime::context::set_scheduler (113 samples, 0.03%)std::thread::local::LocalKey<T>::with (113 samples, 0.03%)std::thread::local::LocalKey<T>::try_with (113 samples, 0.03%)tokio::runtime::context::set_scheduler::{{closure}} (113 samples, 0.03%)tokio::runtime::context::scoped::Scoped<T>::set (113 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (113 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Context::run (113 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (59 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (55 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (53 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (116 samples, 0.03%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (116 samples, 0.03%)tokio::runtime::task::harness::poll_future (119 samples, 0.03%)std::panic::catch_unwind (119 samples, 0.03%)std::panicking::try (119 samples, 0.03%)std::panicking::try::do_call (119 samples, 0.03%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (119 samples, 0.03%)tokio::runtime::task::harness::poll_future::{{closure}} (119 samples, 0.03%)tokio::runtime::task::core::Core<T,S>::poll (119 samples, 0.03%)tokio::runtime::task::raw::poll (136 samples, 0.04%)tokio::runtime::task::harness::Harness<T,S>::poll (123 samples, 0.03%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (122 samples, 0.03%)torrust_tracker::bootstrap::logging::INIT (43 samples, 0.01%)__memcpy_avx512_unaligned_erms (652 samples, 0.17%)__entry_text_start (57 samples, 0.02%)_int_free (317 samples, 0.08%)_int_malloc (313 samples, 0.08%)torrust_tracker::servers::udp::logging::log_request::__CALLSITE::META (1,319 samples, 0.35%)__GI___lll_lock_wait_private (265 samples, 0.07%)futex_wait (154 samples, 0.04%)futex_fatal_error (55 samples, 0.01%)__memcpy_avx512_unaligned_erms (1,079 samples, 0.29%)_int_free (67 samples, 0.02%)_int_malloc (120 samples, 0.03%)torrust_tracker::servers::udp::logging::log_request::__CALLSITE (1,582 samples, 0.42%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (122 samples, 0.03%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (59 samples, 0.02%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (45 samples, 0.01%)__GI___libc_malloc (123 samples, 0.03%)_int_malloc (143 samples, 0.04%)alloc::vec::Vec<T>::with_capacity (237 samples, 0.06%)alloc::vec::Vec<T,A>::with_capacity_in (237 samples, 0.06%)alloc::raw_vec::RawVec<T,A>::with_capacity_in (234 samples, 0.06%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (234 samples, 0.06%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (231 samples, 0.06%)alloc::alloc::Global::alloc_impl (231 samples, 0.06%)alloc::alloc::alloc (231 samples, 0.06%)__rdl_alloc (231 samples, 0.06%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (231 samples, 0.06%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (207 samples, 0.05%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (207 samples, 0.05%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (207 samples, 0.05%)__entry_text_start (140 samples, 0.04%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}}::{{closure}} (286 samples, 0.08%)mio::net::udp::UdpSocket::recv_from (273 samples, 0.07%)mio::io_source::IoSource<T>::do_io (273 samples, 0.07%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (273 samples, 0.07%)mio::net::udp::UdpSocket::recv_from::{{closure}} (273 samples, 0.07%)std::net::udp::UdpSocket::recv_from (273 samples, 0.07%)std::sys_common::net::UdpSocket::recv_from (273 samples, 0.07%)std::sys::pal::unix::net::Socket::recv_from (273 samples, 0.07%)std::sys::pal::unix::net::Socket::recv_from_with_flags (273 samples, 0.07%)core::mem::zeroed (131 samples, 0.03%)core::mem::maybe_uninit::MaybeUninit<T>::zeroed (131 samples, 0.03%)core::ptr::mut_ptr::<impl *mut T>::write_bytes (131 samples, 0.03%)core::intrinsics::write_bytes (131 samples, 0.03%)__entry_text_start (131 samples, 0.03%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (639 samples, 0.17%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (360 samples, 0.10%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (355 samples, 0.09%)__entry_text_start (171 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_option_task_without_yield (186 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task (186 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::with_current (186 samples, 0.05%)tokio::runtime::context::with_scheduler (185 samples, 0.05%)std::thread::local::LocalKey<T>::try_with (185 samples, 0.05%)tokio::runtime::context::with_scheduler::{{closure}} (185 samples, 0.05%)tokio::runtime::context::scoped::Scoped<T>::with (185 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (185 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (185 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (185 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (185 samples, 0.05%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (185 samples, 0.05%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (185 samples, 0.05%)tokio::runtime::driver::Handle::unpark (185 samples, 0.05%)tokio::runtime::driver::IoHandle::unpark (185 samples, 0.05%)tokio::runtime::io::driver::Handle::unpark (185 samples, 0.05%)mio::waker::Waker::wake (185 samples, 0.05%)mio::sys::unix::waker::fdbased::Waker::wake (185 samples, 0.05%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (182 samples, 0.05%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (182 samples, 0.05%)tokio::runtime::driver::Handle::unpark (178 samples, 0.05%)tokio::runtime::driver::IoHandle::unpark (178 samples, 0.05%)torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (1,051 samples, 0.28%)torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (1,045 samples, 0.28%)torrust_tracker::servers::udp::server::Udp::spawn_request_processor (319 samples, 0.08%)tokio::task::spawn::spawn (319 samples, 0.08%)tokio::task::spawn::spawn_inner (319 samples, 0.08%)tokio::runtime::context::current::with_current (319 samples, 0.08%)std::thread::local::LocalKey<T>::try_with (319 samples, 0.08%)tokio::runtime::context::current::with_current::{{closure}} (319 samples, 0.08%)core::option::Option<T>::map (319 samples, 0.08%)tokio::task::spawn::spawn_inner::{{closure}} (319 samples, 0.08%)tokio::runtime::scheduler::Handle::spawn (319 samples, 0.08%)tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (319 samples, 0.08%)tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (319 samples, 0.08%)tokio::runtime::task::list::OwnedTasks<S>::bind (133 samples, 0.04%)tokio::runtime::task::new_task (77 samples, 0.02%)tokio::runtime::task::raw::RawTask::new (77 samples, 0.02%)tokio::runtime::task::core::Cell<T,S>::new (77 samples, 0.02%)alloc::boxed::Box<T>::new (40 samples, 0.01%)all (378,369 samples, 100%)tokio-runtime-w (378,173 samples, 99.95%)tokio-runtime-w \ No newline at end of file diff --git a/docs/media/flamegraph_generated_without_sudo.svg b/docs/media/flamegraph_generated_without_sudo.svg new file mode 100644 index 000000000..84c00ffe3 --- /dev/null +++ b/docs/media/flamegraph_generated_without_sudo.svg @@ -0,0 +1,491 @@ +Flame Graph Reset ZoomSearch [unknown] (188 samples, 0.14%)[unknown] (187 samples, 0.14%)[unknown] (186 samples, 0.14%)[unknown] (178 samples, 0.14%)[unknown] (172 samples, 0.13%)[unknown] (158 samples, 0.12%)[unknown] (158 samples, 0.12%)[unknown] (125 samples, 0.10%)[unknown] (102 samples, 0.08%)[unknown] (93 samples, 0.07%)[unknown] (92 samples, 0.07%)[unknown] (41 samples, 0.03%)[unknown] (38 samples, 0.03%)[unknown] (38 samples, 0.03%)[unknown] (29 samples, 0.02%)[unknown] (25 samples, 0.02%)[unknown] (21 samples, 0.02%)[unknown] (18 samples, 0.01%)[unknown] (15 samples, 0.01%)__GI___mmap64 (18 samples, 0.01%)__GI___mmap64 (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (17 samples, 0.01%)profiling (214 samples, 0.16%)clone3 (22 samples, 0.02%)start_thread (22 samples, 0.02%)std::sys::pal::unix::thread::Thread::new::thread_start (20 samples, 0.02%)std::sys::pal::unix::stack_overflow::Handler::new (20 samples, 0.02%)std::sys::pal::unix::stack_overflow::imp::make_handler (20 samples, 0.02%)std::sys::pal::unix::stack_overflow::imp::get_stack (19 samples, 0.01%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (30 samples, 0.02%)[[vdso]] (93 samples, 0.07%)<torrust_tracker::shared::crypto::ephemeral_instance_keys::RANDOM_SEED as core::ops::deref::Deref>::deref::__stability::LAZY (143 samples, 0.11%)<alloc::collections::btree::map::Values<K,V> as core::iter::traits::iterator::Iterator>::next (31 samples, 0.02%)<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (28 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Immut,K,V>::next_unchecked (28 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<BorrowType,K,V>::init_front (21 samples, 0.02%)[[vdso]] (91 samples, 0.07%)__GI___clock_gettime (14 samples, 0.01%)_int_malloc (53 samples, 0.04%)epoll_wait (254 samples, 0.19%)tokio::runtime::context::with_scheduler (28 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (14 samples, 0.01%)tokio::runtime::context::with_scheduler::{{closure}} (14 samples, 0.01%)core::option::Option<T>::map (17 samples, 0.01%)mio::sys::unix::selector::epoll::Selector::select::{{closure}} (17 samples, 0.01%)mio::poll::Poll::poll (27 samples, 0.02%)mio::sys::unix::selector::epoll::Selector::select (27 samples, 0.02%)tokio::runtime::io::driver::Driver::turn (54 samples, 0.04%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (26 samples, 0.02%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (17 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (41 samples, 0.03%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (71 samples, 0.05%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (65 samples, 0.05%)core::sync::atomic::AtomicUsize::fetch_add (65 samples, 0.05%)core::sync::atomic::atomic_add (65 samples, 0.05%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (31 samples, 0.02%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (21 samples, 0.02%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark_condvar (18 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (49 samples, 0.04%)tokio::loom::std::mutex::Mutex<T>::lock (33 samples, 0.03%)std::sync::mutex::Mutex<T>::lock (16 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (93 samples, 0.07%)tokio::runtime::scheduler::multi_thread::park::Parker::park (75 samples, 0.06%)tokio::runtime::scheduler::multi_thread::park::Inner::park (75 samples, 0.06%)core::cell::RefCell<T>::borrow_mut (18 samples, 0.01%)core::cell::RefCell<T>::try_borrow_mut (18 samples, 0.01%)core::cell::BorrowRefMut::new (18 samples, 0.01%)tokio::runtime::coop::budget (26 samples, 0.02%)tokio::runtime::coop::with_budget (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (21 samples, 0.02%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (96 samples, 0.07%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_searching (27 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::transition_worker_from_searching (18 samples, 0.01%)<core::result::Result<T,E> as core::ops::try_trait::Try>::branch (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::end_processing_scheduled_tasks (35 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Context::maintenance (14 samples, 0.01%)<T as core::slice::cmp::SliceContains>::slice_contains::{{closure}} (90 samples, 0.07%)core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq (90 samples, 0.07%)core::slice::<impl [T]>::contains (220 samples, 0.17%)<T as core::slice::cmp::SliceContains>::slice_contains (220 samples, 0.17%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::any (220 samples, 0.17%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (54 samples, 0.04%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (54 samples, 0.04%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (240 samples, 0.18%)tokio::runtime::scheduler::multi_thread::idle::Idle::unpark_worker_by_id (20 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (265 samples, 0.20%)tokio::runtime::scheduler::multi_thread::worker::Context::park (284 samples, 0.22%)core::option::Option<T>::or_else (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task::{{closure}} (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::pop (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task (22 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (40 samples, 0.03%)core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next (17 samples, 0.01%)<core::ops::range::Range<T> as core::iter::range::RangeIteratorImpl>::spec_next (17 samples, 0.01%)core::num::<impl u32>::wrapping_add (17 samples, 0.01%)core::sync::atomic::AtomicU64::compare_exchange (26 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (129 samples, 0.10%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (128 samples, 0.10%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (119 samples, 0.09%)tokio::runtime::scheduler::multi_thread::queue::pack (39 samples, 0.03%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::run (613 samples, 0.47%)tokio::runtime::context::runtime::enter_runtime (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (613 samples, 0.47%)tokio::runtime::context::set_scheduler (613 samples, 0.47%)std::thread::local::LocalKey<T>::with (613 samples, 0.47%)std::thread::local::LocalKey<T>::try_with (613 samples, 0.47%)tokio::runtime::context::set_scheduler::{{closure}} (613 samples, 0.47%)tokio::runtime::context::scoped::Scoped<T>::set (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (613 samples, 0.47%)tokio::runtime::scheduler::multi_thread::worker::Context::run (613 samples, 0.47%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (777 samples, 0.59%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (776 samples, 0.59%)core::ptr::drop_in_place<tokio::runtime::task::core::TaskIdGuard> (16 samples, 0.01%)<tokio::runtime::task::core::TaskIdGuard as core::ops::drop::Drop>::drop (16 samples, 0.01%)tokio::runtime::context::set_current_task_id (16 samples, 0.01%)std::thread::local::LocalKey<T>::try_with (16 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (20 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (20 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::poll (835 samples, 0.64%)tokio::runtime::task::core::Core<T,S>::drop_future_or_output (56 samples, 0.04%)tokio::runtime::task::core::Core<T,S>::set_stage (46 samples, 0.04%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (897 samples, 0.68%)tokio::runtime::task::harness::poll_future::{{closure}} (897 samples, 0.68%)tokio::runtime::task::core::Core<T,S>::store_output (62 samples, 0.05%)tokio::runtime::task::harness::poll_future (930 samples, 0.71%)std::panic::catch_unwind (927 samples, 0.71%)std::panicking::try (927 samples, 0.71%)std::panicking::try::do_call (925 samples, 0.70%)core::mem::manually_drop::ManuallyDrop<T>::take (28 samples, 0.02%)core::ptr::read (28 samples, 0.02%)tokio::runtime::task::raw::poll (938 samples, 0.71%)tokio::runtime::task::harness::Harness<T,S>::poll (934 samples, 0.71%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (934 samples, 0.71%)core::array::<impl core::default::Default for [T: 32]>::default (26 samples, 0.02%)tokio::runtime::time::Inner::lock (16 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (16 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (16 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::lock (15 samples, 0.01%)core::sync::atomic::AtomicU32::compare_exchange (15 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (15 samples, 0.01%)tokio::runtime::time::wheel::Wheel::poll (25 samples, 0.02%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (98 samples, 0.07%)tokio::runtime::time::Driver::park_internal (51 samples, 0.04%)tokio::runtime::time::wheel::Wheel::next_expiration (15 samples, 0.01%)<F as core::future::into_future::IntoFuture>::into_future (16 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (24 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (46 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (131 samples, 0.10%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (24 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (14 samples, 0.01%)core::sync::atomic::AtomicU32::load (14 samples, 0.01%)core::sync::atomic::atomic_load (14 samples, 0.01%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (39 samples, 0.03%)std::sync::rwlock::RwLock<T>::read (34 samples, 0.03%)std::sys::sync::rwlock::futex::RwLock::read (32 samples, 0.02%)[[heap]] (2,361 samples, 1.80%)[..[[vdso]] (313 samples, 0.24%)<alloc::collections::btree::map::Values<K,V> as core::iter::traits::iterator::Iterator>::next (41 samples, 0.03%)<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (28 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Immut,K,V>::next_unchecked (16 samples, 0.01%)<alloc::string::String as core::fmt::Write>::write_str (67 samples, 0.05%)alloc::string::String::push_str (18 samples, 0.01%)alloc::vec::Vec<T,A>::extend_from_slice (18 samples, 0.01%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (18 samples, 0.01%)alloc::vec::Vec<T,A>::append_elements (18 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (36 samples, 0.03%)core::num::<impl u64>::rotate_left (28 samples, 0.02%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (60 samples, 0.05%)core::num::<impl u64>::wrapping_add (14 samples, 0.01%)core::hash::sip::u8to64_le (60 samples, 0.05%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (184 samples, 0.14%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (15 samples, 0.01%)tokio::runtime::context::CONTEXT::__getit (19 samples, 0.01%)core::cell::Cell<T>::get (17 samples, 0.01%)<tokio::future::poll_fn::PollFn<F> as core::future::future::Future>::poll (26 samples, 0.02%)core::ops::function::FnMut::call_mut (21 samples, 0.02%)tokio::runtime::coop::poll_proceed (21 samples, 0.02%)tokio::runtime::context::budget (21 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (21 samples, 0.02%)[unknown] (18 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (195 samples, 0.15%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::io::scheduled_io::Waiters>> (14 samples, 0.01%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (14 samples, 0.01%)core::result::Result<T,E>::is_err (18 samples, 0.01%)core::result::Result<T,E>::is_ok (18 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (51 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (46 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (39 samples, 0.03%)core::sync::atomic::AtomicU32::compare_exchange (19 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (19 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (245 samples, 0.19%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (26 samples, 0.02%)[[vdso]] (748 samples, 0.57%)[profiling] (34 samples, 0.03%)core::fmt::write (31 samples, 0.02%)__GI___clock_gettime (29 samples, 0.02%)__GI___libc_free (131 samples, 0.10%)arena_for_chunk (20 samples, 0.02%)arena_for_chunk (19 samples, 0.01%)heap_for_ptr (19 samples, 0.01%)heap_max_size (14 samples, 0.01%)__GI___libc_malloc (114 samples, 0.09%)__GI___libc_realloc (15 samples, 0.01%)__GI___lll_lock_wake_private (22 samples, 0.02%)__GI___pthread_disable_asynccancel (66 samples, 0.05%)__GI_getsockname (249 samples, 0.19%)__libc_calloc (15 samples, 0.01%)__libc_recvfrom (23 samples, 0.02%)__libc_sendto (130 samples, 0.10%)__memcmp_evex_movbe (451 samples, 0.34%)__memcpy_avx512_unaligned_erms (426 samples, 0.32%)__memset_avx512_unaligned_erms (215 samples, 0.16%)__posix_memalign (17 samples, 0.01%)_int_free (418 samples, 0.32%)tcache_put (24 samples, 0.02%)_int_malloc (385 samples, 0.29%)_int_memalign (31 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::entry (26 samples, 0.02%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (15 samples, 0.01%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (15 samples, 0.01%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (15 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::insert (54 samples, 0.04%)alloc::raw_vec::RawVec<T,A>::grow_one (15 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (96 samples, 0.07%)alloc::raw_vec::RawVec<T,A>::grow_amortized (66 samples, 0.05%)core::num::<impl usize>::checked_add (18 samples, 0.01%)core::num::<impl usize>::overflowing_add (18 samples, 0.01%)alloc::raw_vec::finish_grow (74 samples, 0.06%)alloc::sync::Arc<T,A>::drop_slow (16 samples, 0.01%)core::mem::drop (14 samples, 0.01%)core::fmt::Formatter::pad_integral (14 samples, 0.01%)core::ptr::drop_in_place<aquatic_udp_protocol::response::Response> (93 samples, 0.07%)core::ptr::drop_in_place<tokio::net::udp::UdpSocket::send_to<&core::net::socket_addr::SocketAddr>::{{closure}}> (23 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (188 samples, 0.14%)core::ptr::drop_in_place<torrust_tracker::servers::udp::handlers::handle_announce::{{closure}}> (30 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::servers::udp::handlers::handle_connect::{{closure}}> (22 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::servers::udp::handlers::handle_packet::{{closure}}> (20 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}}> (19 samples, 0.01%)core::ptr::drop_in_place<torrust_tracker::servers::udp::server::Udp::send_response::{{closure}}> (22 samples, 0.02%)malloc_consolidate (24 samples, 0.02%)core::core_arch::x86::avx2::_mm256_or_si256 (15 samples, 0.01%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (17 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (17 samples, 0.01%)rand_chacha::guts::round (66 samples, 0.05%)rand_chacha::guts::refill_wide::impl_avx2 (99 samples, 0.08%)rand_chacha::guts::refill_wide::fn_impl (98 samples, 0.07%)rand_chacha::guts::refill_wide_impl (98 samples, 0.07%)std::io::error::Error::kind (14 samples, 0.01%)[unknown] (42 samples, 0.03%)[unknown] (14 samples, 0.01%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (490 samples, 0.37%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (211 samples, 0.16%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (84 samples, 0.06%)tokio::runtime::task::core::Header::get_owner_id (18 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with (18 samples, 0.01%)tokio::runtime::task::core::Header::get_owner_id::{{closure}} (18 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (20 samples, 0.02%)tokio::runtime::task::list::OwnedTasks<S>::remove (19 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (31 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (29 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage (108 samples, 0.08%)tokio::runtime::task::core::TaskIdGuard::enter (14 samples, 0.01%)tokio::runtime::context::set_current_task_id (14 samples, 0.01%)std::thread::local::LocalKey<T>::try_with (14 samples, 0.01%)tokio::runtime::task::harness::Harness<T,S>::complete (21 samples, 0.02%)tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (32 samples, 0.02%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (54 samples, 0.04%)tokio::runtime::task::raw::drop_abort_handle (41 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::maintenance (17 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (22 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (22 samples, 0.02%)<T as core::slice::cmp::SliceContains>::slice_contains::{{closure}} (79 samples, 0.06%)core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq (79 samples, 0.06%)core::slice::<impl [T]>::contains (178 samples, 0.14%)<T as core::slice::cmp::SliceContains>::slice_contains (178 samples, 0.14%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::any (178 samples, 0.14%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (40 samples, 0.03%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (40 samples, 0.03%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (216 samples, 0.16%)tokio::loom::std::mutex::Mutex<T>::lock (16 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (16 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (219 samples, 0.17%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (29 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (29 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::unlock (22 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_parked (54 samples, 0.04%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (18 samples, 0.01%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (18 samples, 0.01%)core::sync::atomic::AtomicU32::load (17 samples, 0.01%)core::sync::atomic::atomic_load (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_if_work_pending (113 samples, 0.09%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::is_empty (51 samples, 0.04%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::is_empty (41 samples, 0.03%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::len (31 samples, 0.02%)core::sync::atomic::AtomicU64::load (14 samples, 0.01%)core::sync::atomic::atomic_load (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park (447 samples, 0.34%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_parked (174 samples, 0.13%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (19 samples, 0.01%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (489 samples, 0.37%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (489 samples, 0.37%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::run (484 samples, 0.37%)tokio::runtime::context::runtime::enter_runtime (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (484 samples, 0.37%)tokio::runtime::context::set_scheduler (484 samples, 0.37%)std::thread::local::LocalKey<T>::with (484 samples, 0.37%)std::thread::local::LocalKey<T>::try_with (484 samples, 0.37%)tokio::runtime::context::set_scheduler::{{closure}} (484 samples, 0.37%)tokio::runtime::context::scoped::Scoped<T>::set (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::Context::run (484 samples, 0.37%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (24 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (20 samples, 0.02%)tokio::runtime::task::raw::poll (515 samples, 0.39%)tokio::runtime::task::harness::Harness<T,S>::poll (493 samples, 0.38%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (493 samples, 0.38%)tokio::runtime::task::harness::poll_future (493 samples, 0.38%)std::panic::catch_unwind (493 samples, 0.38%)std::panicking::try (493 samples, 0.38%)std::panicking::try::do_call (493 samples, 0.38%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (493 samples, 0.38%)tokio::runtime::task::harness::poll_future::{{closure}} (493 samples, 0.38%)tokio::runtime::task::core::Core<T,S>::poll (493 samples, 0.38%)tokio::runtime::time::wheel::Wheel::next_expiration (16 samples, 0.01%)torrust_tracker::core::Tracker::authorize::{{closure}} (27 samples, 0.02%)torrust_tracker::core::Tracker::get_torrent_peers_for_peer (15 samples, 0.01%)torrust_tracker::core::Tracker::send_stats_event::{{closure}} (44 samples, 0.03%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (15 samples, 0.01%)<std::hash::random::DefaultHasher as core::hash::Hasher>::finish (47 samples, 0.04%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::finish (47 samples, 0.04%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::finish (47 samples, 0.04%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::d_rounds (29 samples, 0.02%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (74 samples, 0.06%)torrust_tracker::servers::udp::peer_builder::from_request (17 samples, 0.01%)torrust_tracker::servers::udp::request::AnnounceWrapper::new (51 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (54 samples, 0.04%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (58 samples, 0.04%)torrust_tracker::core::Tracker::announce::{{closure}} (70 samples, 0.05%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (113 samples, 0.09%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (175 samples, 0.13%)<T as alloc::string::ToString>::to_string (38 samples, 0.03%)core::option::Option<T>::expect (56 samples, 0.04%)torrust_tracker_primitives::info_hash::InfoHash::to_hex_string (18 samples, 0.01%)<T as alloc::string::ToString>::to_string (18 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (180 samples, 0.14%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (468 samples, 0.36%)torrust_tracker::servers::udp::logging::log_response (38 samples, 0.03%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (669 samples, 0.51%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (152 samples, 0.12%)torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (147 samples, 0.11%)tokio::net::udp::UdpSocket::send_to::{{closure}} (138 samples, 0.11%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (119 samples, 0.09%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (75 samples, 0.06%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (39 samples, 0.03%)mio::net::udp::UdpSocket::send_to (39 samples, 0.03%)mio::io_source::IoSource<T>::do_io (39 samples, 0.03%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (39 samples, 0.03%)mio::net::udp::UdpSocket::send_to::{{closure}} (39 samples, 0.03%)std::net::udp::UdpSocket::send_to (39 samples, 0.03%)std::sys_common::net::UdpSocket::send_to (39 samples, 0.03%)std::sys::pal::unix::cvt (39 samples, 0.03%)<isize as std::sys::pal::unix::IsMinusOne>::is_minus_one (39 samples, 0.03%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::get_stats (15 samples, 0.01%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (14 samples, 0.01%)<core::iter::adapters::filter::Filter<I,P> as core::iter::traits::iterator::Iterator>::count::to_usize::{{closure}} (33 samples, 0.03%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats::{{closure}} (33 samples, 0.03%)torrust_tracker_primitives::peer::Peer::is_seeder (33 samples, 0.03%)<core::iter::adapters::filter::Filter<I,P> as core::iter::traits::iterator::Iterator>::count (75 samples, 0.06%)core::iter::traits::iterator::Iterator::sum (75 samples, 0.06%)<usize as core::iter::traits::accum::Sum>::sum (75 samples, 0.06%)<core::iter::adapters::map::Map<I,F> as core::iter::traits::iterator::Iterator>::fold (75 samples, 0.06%)core::iter::traits::iterator::Iterator::fold (75 samples, 0.06%)core::iter::adapters::map::map_fold::{{closure}} (34 samples, 0.03%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (104 samples, 0.08%)alloc::collections::btree::map::BTreeMap<K,V,A>::values (24 samples, 0.02%)core::mem::drop (15 samples, 0.01%)core::ptr::drop_in_place<core::option::Option<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (15 samples, 0.01%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (15 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (15 samples, 0.01%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::insert_or_update_peer_and_get_stats (215 samples, 0.16%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer_and_get_stats (198 samples, 0.15%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer (89 samples, 0.07%)core::option::Option<T>::is_some_and (32 samples, 0.02%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer::{{closure}} (31 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (30 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (30 samples, 0.02%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (26 samples, 0.02%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (34 samples, 0.03%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (34 samples, 0.03%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (58 samples, 0.04%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (58 samples, 0.04%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (58 samples, 0.04%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (58 samples, 0.04%)<u8 as core::slice::cmp::SliceOrd>::compare (58 samples, 0.04%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (20 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (238 samples, 0.18%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (236 samples, 0.18%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (208 samples, 0.16%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (208 samples, 0.16%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (282 samples, 0.21%)alloc::collections::btree::map::BTreeMap<K,V,A>::entry (67 samples, 0.05%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (61 samples, 0.05%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (53 samples, 0.04%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (53 samples, 0.04%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (22 samples, 0.02%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (22 samples, 0.02%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (22 samples, 0.02%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (22 samples, 0.02%)<u8 as core::slice::cmp::SliceOrd>::compare (22 samples, 0.02%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (18 samples, 0.01%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (23 samples, 0.02%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (23 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (43 samples, 0.03%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (43 samples, 0.03%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (43 samples, 0.03%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (43 samples, 0.03%)<u8 as core::slice::cmp::SliceOrd>::compare (43 samples, 0.03%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (17 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (151 samples, 0.12%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (145 samples, 0.11%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (137 samples, 0.10%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (137 samples, 0.10%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (266 samples, 0.20%)core::sync::atomic::AtomicU32::load (27 samples, 0.02%)core::sync::atomic::atomic_load (27 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (38 samples, 0.03%)std::sync::rwlock::RwLock<T>::read (37 samples, 0.03%)std::sys::sync::rwlock::futex::RwLock::read (36 samples, 0.03%)tracing::span::Span::log (16 samples, 0.01%)tracing::span::Span::record_all (70 samples, 0.05%)unlink_chunk (139 samples, 0.11%)rand::rng::Rng::gen (30 samples, 0.02%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (30 samples, 0.02%)rand::rng::Rng::gen (30 samples, 0.02%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (30 samples, 0.02%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (30 samples, 0.02%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (30 samples, 0.02%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (30 samples, 0.02%)rand_core::block::BlockRng<R>::generate_and_set (28 samples, 0.02%)[anon] (8,759 samples, 6.67%)[anon]uuid::v4::<impl uuid::Uuid>::new_v4 (32 samples, 0.02%)uuid::rng::bytes (32 samples, 0.02%)rand::random (32 samples, 0.02%)<tokio::future::poll_fn::PollFn<F> as core::future::future::Future>::poll (15 samples, 0.01%)_int_free (338 samples, 0.26%)tcache_put (18 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (22 samples, 0.02%)hashbrown::raw::h2 (14 samples, 0.01%)hashbrown::raw::RawTable<T,A>::find_or_find_insert_slot (23 samples, 0.02%)hashbrown::raw::RawTableInner::find_or_find_insert_slot_inner (17 samples, 0.01%)hashbrown::map::HashMap<K,V,S,A>::insert (25 samples, 0.02%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (18 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (15 samples, 0.01%)[profiling] (545 samples, 0.42%)<alloc::collections::btree::map::Values<K,V> as core::iter::traits::iterator::Iterator>::next (32 samples, 0.02%)<alloc::collections::btree::map::Iter<K,V> as core::iter::traits::iterator::Iterator>::next (22 samples, 0.02%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Immut,K,V>::next_unchecked (16 samples, 0.01%)alloc::vec::Vec<T,A>::reserve (30 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::reserve (28 samples, 0.02%)<alloc::string::String as core::fmt::Write>::write_str (83 samples, 0.06%)alloc::string::String::push_str (57 samples, 0.04%)alloc::vec::Vec<T,A>::extend_from_slice (57 samples, 0.04%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (57 samples, 0.04%)alloc::vec::Vec<T,A>::append_elements (57 samples, 0.04%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (20 samples, 0.02%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (41 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (151 samples, 0.12%)core::hash::sip::u8to64_le (50 samples, 0.04%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (33 samples, 0.03%)tokio::runtime::context::CONTEXT::__getit (35 samples, 0.03%)core::cell::Cell<T>::get (33 samples, 0.03%)[unknown] (20 samples, 0.02%)<tokio::future::poll_fn::PollFn<F> as core::future::future::Future>::poll (75 samples, 0.06%)core::ops::function::FnMut::call_mut (66 samples, 0.05%)tokio::runtime::coop::poll_proceed (66 samples, 0.05%)tokio::runtime::context::budget (66 samples, 0.05%)std::thread::local::LocalKey<T>::try_with (66 samples, 0.05%)tokio::runtime::context::budget::{{closure}} (27 samples, 0.02%)tokio::runtime::coop::poll_proceed::{{closure}} (27 samples, 0.02%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (110 samples, 0.08%)[unknown] (15 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::io::scheduled_io::Waiters>> (27 samples, 0.02%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (27 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::unlock (14 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (84 samples, 0.06%)std::sync::mutex::Mutex<T>::lock (70 samples, 0.05%)std::sys::sync::mutex::futex::Mutex::lock (59 samples, 0.04%)core::sync::atomic::AtomicU32::compare_exchange (55 samples, 0.04%)core::sync::atomic::atomic_compare_exchange (55 samples, 0.04%)[unknown] (33 samples, 0.03%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (214 samples, 0.16%)__memcpy_avx512_unaligned_erms (168 samples, 0.13%)[profiling] (171 samples, 0.13%)binascii::bin2hex (77 samples, 0.06%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (21 samples, 0.02%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (21 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (280 samples, 0.21%)[unknown] (317 samples, 0.24%)[[vdso]] (2,648 samples, 2.02%)[..[unknown] (669 samples, 0.51%)[unknown] (396 samples, 0.30%)[unknown] (251 samples, 0.19%)[unknown] (65 samples, 0.05%)[unknown] (30 samples, 0.02%)[unknown] (21 samples, 0.02%)__GI___clock_gettime (56 samples, 0.04%)arena_for_chunk (72 samples, 0.05%)arena_for_chunk (62 samples, 0.05%)heap_for_ptr (49 samples, 0.04%)heap_max_size (28 samples, 0.02%)__GI___libc_free (194 samples, 0.15%)arena_for_chunk (19 samples, 0.01%)checked_request2size (24 samples, 0.02%)__GI___libc_malloc (220 samples, 0.17%)tcache_get (44 samples, 0.03%)__GI___libc_write (25 samples, 0.02%)__GI___libc_write (14 samples, 0.01%)__GI___pthread_disable_asynccancel (97 samples, 0.07%)core::num::<impl u128>::leading_zeros (15 samples, 0.01%)compiler_builtins::float::conv::int_to_float::u128_to_f64_bits (72 samples, 0.05%)__floattidf (90 samples, 0.07%)compiler_builtins::float::conv::__floattidf (86 samples, 0.07%)exp_inline (40 samples, 0.03%)log_inline (64 samples, 0.05%)__ieee754_pow_fma (114 samples, 0.09%)__libc_calloc (106 samples, 0.08%)__libc_recvfrom (252 samples, 0.19%)__libc_sendto (133 samples, 0.10%)__memcmp_evex_movbe (137 samples, 0.10%)__memcpy_avx512_unaligned_erms (1,399 samples, 1.07%)__posix_memalign (172 samples, 0.13%)__posix_memalign (80 samples, 0.06%)_mid_memalign (71 samples, 0.05%)arena_for_chunk (14 samples, 0.01%)__pow (18 samples, 0.01%)__vdso_clock_gettime (40 samples, 0.03%)[unknown] (24 samples, 0.02%)_int_free (462 samples, 0.35%)tcache_put (54 samples, 0.04%)[unknown] (14 samples, 0.01%)_int_malloc (508 samples, 0.39%)_int_memalign (68 samples, 0.05%)alloc::collections::btree::map::BTreeMap<K,V,A>::insert (54 samples, 0.04%)<core::result::Result<T,E> as core::ops::try_trait::Try>::branch (14 samples, 0.01%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (78 samples, 0.06%)alloc::raw_vec::RawVec<T,A>::grow_amortized (73 samples, 0.06%)alloc::raw_vec::finish_grow (91 samples, 0.07%)core::result::Result<T,E>::map_err (31 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Weak<ring::ec::curve25519::ed25519::signing::Ed25519KeyPair,&alloc::alloc::Global>> (16 samples, 0.01%)<alloc::sync::Weak<T,A> as core::ops::drop::Drop>::drop (16 samples, 0.01%)core::mem::drop (18 samples, 0.01%)alloc::sync::Arc<T,A>::drop_slow (21 samples, 0.02%)alloc_new_heap (49 samples, 0.04%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (49 samples, 0.04%)core::fmt::Formatter::pad_integral (40 samples, 0.03%)core::fmt::Formatter::pad_integral::write_prefix (19 samples, 0.01%)core::fmt::write (20 samples, 0.02%)core::ptr::drop_in_place<[core::option::Option<core::task::wake::Waker>: 32]> (155 samples, 0.12%)core::ptr::drop_in_place<core::option::Option<core::task::wake::Waker>> (71 samples, 0.05%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (245 samples, 0.19%)core::ptr::drop_in_place<torrust_tracker::servers::udp::handlers::handle_announce::{{closure}}> (33 samples, 0.03%)core::ptr::drop_in_place<torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}}> (37 samples, 0.03%)core::str::converts::from_utf8 (33 samples, 0.03%)core::str::validations::run_utf8_validation (20 samples, 0.02%)epoll_wait (31 samples, 0.02%)hashbrown::map::HashMap<K,V,S,A>::insert (17 samples, 0.01%)rand_chacha::guts::refill_wide (19 samples, 0.01%)std_detect::detect::arch::x86::__is_feature_detected::avx2 (17 samples, 0.01%)std_detect::detect::check_for (17 samples, 0.01%)std_detect::detect::cache::test (17 samples, 0.01%)std_detect::detect::cache::Cache::test (17 samples, 0.01%)core::sync::atomic::AtomicUsize::load (17 samples, 0.01%)core::sync::atomic::atomic_load (17 samples, 0.01%)std::sys::pal::unix::time::Timespec::new (29 samples, 0.02%)std::sys::pal::unix::time::Timespec::now (132 samples, 0.10%)core::cmp::impls::<impl core::cmp::PartialOrd<&B> for &A>::ge (22 samples, 0.02%)core::cmp::PartialOrd::ge (22 samples, 0.02%)std::sys::pal::unix::time::Timespec::sub_timespec (67 samples, 0.05%)std::sys::sync::mutex::futex::Mutex::lock_contended (18 samples, 0.01%)std::sys_common::net::TcpListener::socket_addr (29 samples, 0.02%)std::sys_common::net::sockname (28 samples, 0.02%)syscall (552 samples, 0.42%)core::ptr::drop_in_place<core::cell::RefMut<core::option::Option<alloc::boxed::Box<tokio::runtime::scheduler::multi_thread::worker::Core>>>> (74 samples, 0.06%)core::ptr::drop_in_place<core::cell::BorrowRefMut> (74 samples, 0.06%)<core::cell::BorrowRefMut as core::ops::drop::Drop>::drop (74 samples, 0.06%)core::cell::Cell<T>::set (74 samples, 0.06%)core::cell::Cell<T>::replace (74 samples, 0.06%)core::mem::replace (74 samples, 0.06%)core::ptr::write (74 samples, 0.06%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::push_back_or_overflow (14 samples, 0.01%)tokio::runtime::context::with_scheduler (176 samples, 0.13%)std::thread::local::LocalKey<T>::try_with (152 samples, 0.12%)tokio::runtime::context::with_scheduler::{{closure}} (151 samples, 0.12%)tokio::runtime::context::scoped::Scoped<T>::with (150 samples, 0.11%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (150 samples, 0.11%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (150 samples, 0.11%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (71 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (16 samples, 0.01%)core::option::Option<T>::map (19 samples, 0.01%)<mio::event::events::Iter as core::iter::traits::iterator::Iterator>::next (24 samples, 0.02%)mio::poll::Poll::poll (53 samples, 0.04%)mio::sys::unix::selector::epoll::Selector::select (53 samples, 0.04%)core::result::Result<T,E>::map (28 samples, 0.02%)mio::sys::unix::selector::epoll::Selector::select::{{closure}} (28 samples, 0.02%)tokio::io::ready::Ready::from_mio (14 samples, 0.01%)tokio::runtime::io::driver::Driver::turn (126 samples, 0.10%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (18 samples, 0.01%)[unknown] (51 samples, 0.04%)[unknown] (100 samples, 0.08%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (326 samples, 0.25%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (205 samples, 0.16%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (77 samples, 0.06%)[unknown] (26 samples, 0.02%)<tokio::util::linked_list::DrainFilter<T,F> as core::iter::traits::iterator::Iterator>::next (16 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (396 samples, 0.30%)tokio::loom::std::mutex::Mutex<T>::lock (18 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (18 samples, 0.01%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (573 samples, 0.44%)core::sync::atomic::AtomicUsize::fetch_add (566 samples, 0.43%)core::sync::atomic::atomic_add (566 samples, 0.43%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (635 samples, 0.48%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (25 samples, 0.02%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::next_remote_task (44 samples, 0.03%)tokio::runtime::scheduler::inject::shared::Shared<T>::is_empty (21 samples, 0.02%)tokio::runtime::scheduler::inject::shared::Shared<T>::len (21 samples, 0.02%)core::sync::atomic::AtomicUsize::load (21 samples, 0.02%)core::sync::atomic::atomic_load (21 samples, 0.02%)tokio::runtime::task::core::Header::get_owner_id (32 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with (32 samples, 0.02%)tokio::runtime::task::core::Header::get_owner_id::{{closure}} (32 samples, 0.02%)std::sync::poison::Flag::done (32 samples, 0.02%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::util::linked_list::LinkedList<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>,tokio::runtime::task::core::Header>>> (43 samples, 0.03%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (43 samples, 0.03%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (123 samples, 0.09%)tokio::runtime::task::list::OwnedTasks<S>::remove (117 samples, 0.09%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (80 samples, 0.06%)tokio::runtime::scheduler::defer::Defer::wake (17 samples, 0.01%)std::sys::pal::unix::futex::futex_wait (46 samples, 0.04%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (71 samples, 0.05%)std::sync::condvar::Condvar::wait (56 samples, 0.04%)std::sys::sync::condvar::futex::Condvar::wait (56 samples, 0.04%)std::sys::sync::condvar::futex::Condvar::wait_optional_timeout (56 samples, 0.04%)core::sync::atomic::AtomicUsize::compare_exchange (37 samples, 0.03%)core::sync::atomic::atomic_compare_exchange (37 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Inner::park_driver (138 samples, 0.11%)tokio::runtime::driver::Driver::park (77 samples, 0.06%)tokio::runtime::driver::TimeDriver::park (77 samples, 0.06%)tokio::runtime::time::Driver::park (75 samples, 0.06%)tokio::runtime::scheduler::multi_thread::park::Parker::park (266 samples, 0.20%)tokio::runtime::scheduler::multi_thread::park::Inner::park (266 samples, 0.20%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (432 samples, 0.33%)tokio::runtime::scheduler::multi_thread::worker::Core::should_notify_others (26 samples, 0.02%)core::cell::RefCell<T>::borrow_mut (94 samples, 0.07%)core::cell::RefCell<T>::try_borrow_mut (94 samples, 0.07%)core::cell::BorrowRefMut::new (94 samples, 0.07%)tokio::runtime::coop::budget (142 samples, 0.11%)tokio::runtime::coop::with_budget (142 samples, 0.11%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (121 samples, 0.09%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (44 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (208 samples, 0.16%)tokio::runtime::signal::Driver::process (30 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (46 samples, 0.04%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (46 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (35 samples, 0.03%)tokio::runtime::task::core::Core<T,S>::set_stage (75 samples, 0.06%)core::sync::atomic::AtomicUsize::fetch_xor (76 samples, 0.06%)core::sync::atomic::atomic_xor (76 samples, 0.06%)tokio::runtime::task::state::State::transition_to_complete (79 samples, 0.06%)tokio::runtime::task::harness::Harness<T,S>::complete (113 samples, 0.09%)tokio::runtime::task::state::State::transition_to_terminal (18 samples, 0.01%)tokio::runtime::task::harness::Harness<T,S>::dealloc (28 samples, 0.02%)core::mem::drop (18 samples, 0.01%)core::ptr::drop_in_place<alloc::boxed::Box<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>>> (18 samples, 0.01%)core::ptr::drop_in_place<tokio::util::sharded_list::ShardGuard<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::current_thread::Handle>>,tokio::runtime::task::core::Header>> (16 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::util::linked_list::LinkedList<tokio::runtime::task::Task<alloc::sync::Arc<tokio::runtime::scheduler::current_thread::Handle>>,tokio::runtime::task::core::Header>>> (16 samples, 0.01%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (16 samples, 0.01%)tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (53 samples, 0.04%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::push_front (21 samples, 0.02%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (113 samples, 0.09%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::lock_shard (15 samples, 0.01%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (15 samples, 0.01%)tokio::loom::std::mutex::Mutex<T>::lock (15 samples, 0.01%)std::sync::mutex::Mutex<T>::lock (15 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::lock (14 samples, 0.01%)tokio::runtime::task::raw::drop_abort_handle (82 samples, 0.06%)tokio::runtime::task::harness::Harness<T,S>::drop_reference (23 samples, 0.02%)tokio::runtime::task::state::State::ref_dec (23 samples, 0.02%)core::sync::atomic::AtomicUsize::compare_exchange (15 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (15 samples, 0.01%)tokio::runtime::task::raw::drop_join_handle_slow (34 samples, 0.03%)tokio::runtime::task::harness::Harness<T,S>::drop_join_handle_slow (32 samples, 0.02%)tokio::runtime::task::state::State::unset_join_interested (23 samples, 0.02%)tokio::runtime::task::state::State::fetch_update (23 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park (43 samples, 0.03%)core::num::<impl u32>::wrapping_add (23 samples, 0.02%)core::option::Option<T>::or_else (37 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task::{{closure}} (36 samples, 0.03%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::pop (36 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task (38 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (59 samples, 0.04%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (45 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (132 samples, 0.10%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (63 samples, 0.05%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::run (290 samples, 0.22%)tokio::runtime::context::runtime::enter_runtime (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (290 samples, 0.22%)tokio::runtime::context::set_scheduler (290 samples, 0.22%)std::thread::local::LocalKey<T>::with (290 samples, 0.22%)std::thread::local::LocalKey<T>::try_with (290 samples, 0.22%)tokio::runtime::context::set_scheduler::{{closure}} (290 samples, 0.22%)tokio::runtime::context::scoped::Scoped<T>::set (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (290 samples, 0.22%)tokio::runtime::scheduler::multi_thread::worker::Context::run (290 samples, 0.22%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (327 samples, 0.25%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (322 samples, 0.25%)tokio::runtime::task::core::Core<T,S>::poll (333 samples, 0.25%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (342 samples, 0.26%)tokio::runtime::task::harness::poll_future::{{closure}} (342 samples, 0.26%)tokio::runtime::task::harness::poll_future (348 samples, 0.27%)std::panic::catch_unwind (347 samples, 0.26%)std::panicking::try (347 samples, 0.26%)std::panicking::try::do_call (347 samples, 0.26%)core::sync::atomic::AtomicUsize::compare_exchange (18 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (18 samples, 0.01%)tokio::runtime::task::state::State::transition_to_running (47 samples, 0.04%)tokio::runtime::task::state::State::fetch_update_action (47 samples, 0.04%)tokio::runtime::task::state::State::transition_to_running::{{closure}} (19 samples, 0.01%)tokio::runtime::task::raw::poll (427 samples, 0.33%)tokio::runtime::task::harness::Harness<T,S>::poll (408 samples, 0.31%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (407 samples, 0.31%)tokio::runtime::task::state::State::transition_to_idle (17 samples, 0.01%)core::array::<impl core::default::Default for [T: 32]>::default (21 samples, 0.02%)tokio::runtime::time::wheel::Wheel::poll (14 samples, 0.01%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (72 samples, 0.05%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process (23 samples, 0.02%)tokio::runtime::time::source::TimeSource::now (15 samples, 0.01%)tokio::runtime::time::source::TimeSource::now (14 samples, 0.01%)tokio::runtime::time::Driver::park_internal (155 samples, 0.12%)tokio::runtime::time::wheel::level::Level::next_occupied_slot (96 samples, 0.07%)tokio::runtime::time::wheel::level::slot_range (35 samples, 0.03%)core::num::<impl usize>::pow (35 samples, 0.03%)tokio::runtime::time::wheel::level::level_range (39 samples, 0.03%)tokio::runtime::time::wheel::level::slot_range (33 samples, 0.03%)core::num::<impl usize>::pow (33 samples, 0.03%)tokio::runtime::time::wheel::level::Level::next_expiration (208 samples, 0.16%)tokio::runtime::time::wheel::level::slot_range (48 samples, 0.04%)core::num::<impl usize>::pow (48 samples, 0.04%)tokio::runtime::time::wheel::Wheel::next_expiration (277 samples, 0.21%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::is_empty (18 samples, 0.01%)core::option::Option<T>::is_some (18 samples, 0.01%)torrust_tracker::core::Tracker::authorize::{{closure}} (50 samples, 0.04%)torrust_tracker::core::Tracker::get_torrent_peers_for_peer (37 samples, 0.03%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::get_peers_for_client (27 samples, 0.02%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_peers_for_client (19 samples, 0.01%)core::iter::traits::iterator::Iterator::collect (17 samples, 0.01%)<alloc::vec::Vec<T> as core::iter::traits::collect::FromIterator<T>>::from_iter (17 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (17 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter_nested::SpecFromIterNested<T,I>>::from_iter (17 samples, 0.01%)<std::hash::random::DefaultHasher as core::hash::Hasher>::finish (20 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::finish (20 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::finish (20 samples, 0.02%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (62 samples, 0.05%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (40 samples, 0.03%)torrust_tracker_clock::time_extent::Make::now (27 samples, 0.02%)torrust_tracker_clock::clock::working::<impl torrust_tracker_clock::clock::Time for torrust_tracker_clock::clock::Clock<torrust_tracker_clock::clock::working::WorkingClock>>::now (17 samples, 0.01%)torrust_tracker::servers::udp::peer_builder::from_request (24 samples, 0.02%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (19 samples, 0.01%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (355 samples, 0.27%)<F as core::future::into_future::IntoFuture>::into_future (24 samples, 0.02%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (37 samples, 0.03%)core::sync::atomic::AtomicUsize::fetch_add (25 samples, 0.02%)core::sync::atomic::atomic_add (25 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_packet (14 samples, 0.01%)core::ptr::drop_in_place<torrust_tracker::servers::udp::UdpRequest> (20 samples, 0.02%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (20 samples, 0.02%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (20 samples, 0.02%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (20 samples, 0.02%)core::result::Result<T,E>::map_err (16 samples, 0.01%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (136 samples, 0.10%)torrust_tracker::core::Tracker::announce::{{closure}} (173 samples, 0.13%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (267 samples, 0.20%)torrust_tracker::servers::udp::handlers::handle_connect::{{closure}} (30 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (423 samples, 0.32%)core::fmt::Formatter::new (26 samples, 0.02%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (80 samples, 0.06%)core::fmt::num::imp::fmt_u64 (58 samples, 0.04%)core::intrinsics::copy_nonoverlapping (15 samples, 0.01%)core::fmt::num::imp::<impl core::fmt::Display for i64>::fmt (74 samples, 0.06%)core::fmt::num::imp::fmt_u64 (70 samples, 0.05%)<T as alloc::string::ToString>::to_string (207 samples, 0.16%)core::option::Option<T>::expect (19 samples, 0.01%)core::ptr::drop_in_place<alloc::string::String> (18 samples, 0.01%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (18 samples, 0.01%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (18 samples, 0.01%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (18 samples, 0.01%)torrust_tracker::servers::udp::logging::map_action_name (25 samples, 0.02%)alloc::str::<impl alloc::borrow::ToOwned for str>::to_owned (14 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request (345 samples, 0.26%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (18 samples, 0.01%)core::fmt::num::imp::fmt_u64 (14 samples, 0.01%)<T as alloc::string::ToString>::to_string (35 samples, 0.03%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (1,067 samples, 0.81%)torrust_tracker::servers::udp::logging::log_response (72 samples, 0.05%)alloc::vec::from_elem (68 samples, 0.05%)<u8 as alloc::vec::spec_from_elem::SpecFromElem>::from_elem (68 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::with_capacity_zeroed_in (68 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (68 samples, 0.05%)<alloc::alloc::Global as core::alloc::Allocator>::allocate_zeroed (68 samples, 0.05%)alloc::alloc::Global::alloc_impl (68 samples, 0.05%)alloc::alloc::alloc_zeroed (68 samples, 0.05%)__rdl_alloc_zeroed (68 samples, 0.05%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc_zeroed (68 samples, 0.05%)[unknown] (48 samples, 0.04%)[unknown] (16 samples, 0.01%)[unknown] (28 samples, 0.02%)std::sys::pal::unix::cvt (134 samples, 0.10%)<isize as std::sys::pal::unix::IsMinusOne>::is_minus_one (134 samples, 0.10%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (1,908 samples, 1.45%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (504 samples, 0.38%)torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (382 samples, 0.29%)tokio::net::udp::UdpSocket::send_to::{{closure}} (344 samples, 0.26%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (332 samples, 0.25%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (304 samples, 0.23%)tokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (215 samples, 0.16%)mio::net::udp::UdpSocket::send_to (185 samples, 0.14%)mio::io_source::IoSource<T>::do_io (185 samples, 0.14%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (185 samples, 0.14%)mio::net::udp::UdpSocket::send_to::{{closure}} (185 samples, 0.14%)std::net::udp::UdpSocket::send_to (185 samples, 0.14%)std::sys_common::net::UdpSocket::send_to (169 samples, 0.13%)alloc::vec::Vec<T>::with_capacity (17 samples, 0.01%)alloc::vec::Vec<T,A>::with_capacity_in (17 samples, 0.01%)tokio::net::udp::UdpSocket::readable::{{closure}} (104 samples, 0.08%)tokio::net::udp::UdpSocket::ready::{{closure}} (85 samples, 0.06%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (190 samples, 0.14%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (49 samples, 0.04%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (28 samples, 0.02%)torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (330 samples, 0.25%)torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (327 samples, 0.25%)torrust_tracker::servers::udp::server::Udp::spawn_request_processor (92 samples, 0.07%)tokio::task::spawn::spawn (92 samples, 0.07%)tokio::task::spawn::spawn_inner (92 samples, 0.07%)tokio::runtime::context::current::with_current (92 samples, 0.07%)std::thread::local::LocalKey<T>::try_with (92 samples, 0.07%)tokio::runtime::context::current::with_current::{{closure}} (92 samples, 0.07%)core::option::Option<T>::map (92 samples, 0.07%)tokio::task::spawn::spawn_inner::{{closure}} (92 samples, 0.07%)tokio::runtime::scheduler::Handle::spawn (92 samples, 0.07%)tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (92 samples, 0.07%)tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (92 samples, 0.07%)tokio::runtime::task::list::OwnedTasks<S>::bind (90 samples, 0.07%)tokio::runtime::task::new_task (89 samples, 0.07%)tokio::runtime::task::raw::RawTask::new (89 samples, 0.07%)tokio::runtime::task::core::Cell<T,S>::new (89 samples, 0.07%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (34 samples, 0.03%)alloc::collections::btree::map::BTreeMap<K,V,A>::values (27 samples, 0.02%)alloc::sync::Arc<T>::new (21 samples, 0.02%)alloc::boxed::Box<T>::new (21 samples, 0.02%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::insert_or_update_peer_and_get_stats (152 samples, 0.12%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer_and_get_stats (125 samples, 0.10%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer (88 samples, 0.07%)core::option::Option<T>::is_some_and (18 samples, 0.01%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer::{{closure}} (17 samples, 0.01%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (17 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (17 samples, 0.01%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (22 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (22 samples, 0.02%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (17 samples, 0.01%)std::sync::rwlock::RwLock<T>::read (16 samples, 0.01%)std::sys::sync::rwlock::futex::RwLock::read (16 samples, 0.01%)tracing::span::Span::log (26 samples, 0.02%)core::fmt::Arguments::new_v1 (15 samples, 0.01%)tracing_core::span::Record::is_empty (34 samples, 0.03%)tracing_core::field::ValueSet::is_empty (34 samples, 0.03%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::all (22 samples, 0.02%)tracing_core::field::ValueSet::is_empty::{{closure}} (18 samples, 0.01%)core::option::Option<T>::is_none (16 samples, 0.01%)core::option::Option<T>::is_some (16 samples, 0.01%)tracing::span::Span::record_all (143 samples, 0.11%)unlink_chunk (185 samples, 0.14%)uuid::builder::Builder::with_variant (48 samples, 0.04%)[unknown] (40 samples, 0.03%)uuid::builder::Builder::from_random_bytes (77 samples, 0.06%)uuid::builder::Builder::with_version (29 samples, 0.02%)[unknown] (24 samples, 0.02%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (161 samples, 0.12%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (161 samples, 0.12%)[unknown] (92 samples, 0.07%)rand::rng::Rng::gen (162 samples, 0.12%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (162 samples, 0.12%)rand::rng::Rng::gen (162 samples, 0.12%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (162 samples, 0.12%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (162 samples, 0.12%)[unknown] (18,233 samples, 13.89%)[unknown]uuid::v4::<impl uuid::Uuid>::new_v4 (270 samples, 0.21%)uuid::rng::bytes (190 samples, 0.14%)rand::random (190 samples, 0.14%)__memcpy_avx512_unaligned_erms (69 samples, 0.05%)_int_free (23 samples, 0.02%)_int_malloc (23 samples, 0.02%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)advise_stack_range (31 samples, 0.02%)__GI_madvise (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (29 samples, 0.02%)[unknown] (28 samples, 0.02%)[unknown] (28 samples, 0.02%)[unknown] (21 samples, 0.02%)[unknown] (17 samples, 0.01%)std::sys::pal::unix::futex::futex_wait (31 samples, 0.02%)syscall (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (31 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (30 samples, 0.02%)[unknown] (29 samples, 0.02%)[unknown] (21 samples, 0.02%)[unknown] (17 samples, 0.01%)std::sync::condvar::Condvar::wait_timeout (35 samples, 0.03%)std::sys::sync::condvar::futex::Condvar::wait_timeout (35 samples, 0.03%)std::sys::sync::condvar::futex::Condvar::wait_optional_timeout (35 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (56 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (56 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (56 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock_contended (56 samples, 0.04%)std::sys::pal::unix::futex::futex_wait (56 samples, 0.04%)syscall (56 samples, 0.04%)[unknown] (56 samples, 0.04%)[unknown] (55 samples, 0.04%)[unknown] (55 samples, 0.04%)[unknown] (55 samples, 0.04%)[unknown] (55 samples, 0.04%)[unknown] (54 samples, 0.04%)[unknown] (54 samples, 0.04%)[unknown] (54 samples, 0.04%)[unknown] (53 samples, 0.04%)[unknown] (52 samples, 0.04%)[unknown] (46 samples, 0.04%)[unknown] (39 samples, 0.03%)[unknown] (38 samples, 0.03%)[unknown] (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (15 samples, 0.01%)[[vdso]] (26 samples, 0.02%)[[vdso]] (263 samples, 0.20%)__ieee754_pow_fma (26 samples, 0.02%)__pow (314 samples, 0.24%)std::f64::<impl f64>::powf (345 samples, 0.26%)__GI___clock_gettime (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::end_processing_scheduled_tasks (416 samples, 0.32%)std::time::Instant::now (20 samples, 0.02%)std::sys::pal::unix::time::Instant::now (20 samples, 0.02%)std::sys::pal::unix::time::Timespec::now (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_processing_scheduled_tasks (24 samples, 0.02%)std::time::Instant::now (18 samples, 0.01%)std::sys::pal::unix::time::Instant::now (18 samples, 0.01%)mio::poll::Poll::poll (102 samples, 0.08%)mio::sys::unix::selector::epoll::Selector::select (102 samples, 0.08%)epoll_wait (99 samples, 0.08%)[unknown] (92 samples, 0.07%)[unknown] (91 samples, 0.07%)[unknown] (91 samples, 0.07%)[unknown] (88 samples, 0.07%)[unknown] (85 samples, 0.06%)[unknown] (84 samples, 0.06%)[unknown] (43 samples, 0.03%)[unknown] (29 samples, 0.02%)[unknown] (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (125 samples, 0.10%)tokio::runtime::scheduler::multi_thread::park::Parker::park_timeout (125 samples, 0.10%)tokio::runtime::driver::Driver::park_timeout (125 samples, 0.10%)tokio::runtime::driver::TimeDriver::park_timeout (125 samples, 0.10%)tokio::runtime::time::Driver::park_timeout (125 samples, 0.10%)tokio::runtime::time::Driver::park_internal (116 samples, 0.09%)tokio::runtime::io::driver::Driver::turn (116 samples, 0.09%)tokio::runtime::scheduler::multi_thread::worker::Context::maintenance (148 samples, 0.11%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (111 samples, 0.08%)alloc::sync::Arc<T,A>::inner (111 samples, 0.08%)core::ptr::non_null::NonNull<T>::as_ref (111 samples, 0.08%)core::sync::atomic::AtomicUsize::compare_exchange (16 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (16 samples, 0.01%)core::bool::<impl bool>::then (88 samples, 0.07%)std::sys::pal::unix::futex::futex_wait (13,339 samples, 10.16%)std::sys::pal::..syscall (13,003 samples, 9.90%)syscall[unknown] (12,895 samples, 9.82%)[unknown][unknown] (12,759 samples, 9.72%)[unknown][unknown] (12,313 samples, 9.38%)[unknown][unknown] (12,032 samples, 9.16%)[unknown][unknown] (11,734 samples, 8.94%)[unknown][unknown] (11,209 samples, 8.54%)[unknown][unknown] (10,265 samples, 7.82%)[unknown][unknown] (9,345 samples, 7.12%)[unknown][unknown] (8,623 samples, 6.57%)[unknown][unknown] (7,744 samples, 5.90%)[unknow..[unknown] (5,922 samples, 4.51%)[unkn..[unknown] (4,459 samples, 3.40%)[un..[unknown] (2,808 samples, 2.14%)[..[unknown] (1,275 samples, 0.97%)[unknown] (1,022 samples, 0.78%)[unknown] (738 samples, 0.56%)[unknown] (607 samples, 0.46%)[unknown] (155 samples, 0.12%)core::result::Result<T,E>::is_err (77 samples, 0.06%)core::result::Result<T,E>::is_ok (77 samples, 0.06%)std::sync::condvar::Condvar::wait (13,429 samples, 10.23%)std::sync::cond..std::sys::sync::condvar::futex::Condvar::wait (13,428 samples, 10.23%)std::sys::sync:..std::sys::sync::condvar::futex::Condvar::wait_optional_timeout (13,428 samples, 10.23%)std::sys::sync:..std::sys::sync::mutex::futex::Mutex::lock (89 samples, 0.07%)tokio::runtime::scheduler::multi_thread::park::Inner::park_condvar (13,508 samples, 10.29%)tokio::runtime:..tokio::loom::std::mutex::Mutex<T>::lock (64 samples, 0.05%)std::sync::mutex::Mutex<T>::lock (32 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (31 samples, 0.02%)core::sync::atomic::AtomicU32::compare_exchange (30 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (30 samples, 0.02%)core::sync::atomic::AtomicUsize::compare_exchange (15 samples, 0.01%)core::sync::atomic::atomic_compare_exchange (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (38 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Parker::park (34 samples, 0.03%)tokio::runtime::scheduler::multi_thread::park::Inner::park (34 samples, 0.03%)core::array::<impl core::default::Default for [T: 32]>::default (17 samples, 0.01%)core::ptr::drop_in_place<[core::option::Option<core::task::wake::Waker>: 32]> (19 samples, 0.01%)tokio::runtime::time::wheel::level::Level::next_occupied_slot (33 samples, 0.03%)tokio::runtime::time::wheel::level::slot_range (15 samples, 0.01%)core::num::<impl usize>::pow (15 samples, 0.01%)tokio::runtime::time::wheel::level::level_range (17 samples, 0.01%)tokio::runtime::time::wheel::level::slot_range (15 samples, 0.01%)core::num::<impl usize>::pow (15 samples, 0.01%)tokio::runtime::time::wheel::level::Level::next_expiration (95 samples, 0.07%)tokio::runtime::time::wheel::level::slot_range (41 samples, 0.03%)core::num::<impl usize>::pow (41 samples, 0.03%)tokio::runtime::time::wheel::Wheel::next_expiration (129 samples, 0.10%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process_at_time (202 samples, 0.15%)tokio::runtime::time::wheel::Wheel::poll_at (17 samples, 0.01%)tokio::runtime::time::wheel::Wheel::next_expiration (15 samples, 0.01%)<mio::event::events::Iter as core::iter::traits::iterator::Iterator>::next (38 samples, 0.03%)core::option::Option<T>::map (38 samples, 0.03%)core::result::Result<T,E>::map (31 samples, 0.02%)mio::sys::unix::selector::epoll::Selector::select::{{closure}} (31 samples, 0.02%)alloc::vec::Vec<T,A>::set_len (17 samples, 0.01%)[[vdso]] (28 samples, 0.02%)[unknown] (11,031 samples, 8.40%)[unknown][unknown] (10,941 samples, 8.33%)[unknown][unknown] (10,850 samples, 8.26%)[unknown][unknown] (10,691 samples, 8.14%)[unknown][unknown] (10,070 samples, 7.67%)[unknown][unknown] (9,737 samples, 7.42%)[unknown][unknown] (7,659 samples, 5.83%)[unknow..[unknown] (6,530 samples, 4.97%)[unkno..[unknown] (5,633 samples, 4.29%)[unkn..[unknown] (5,055 samples, 3.85%)[unk..[unknown] (4,046 samples, 3.08%)[un..[unknown] (2,911 samples, 2.22%)[..[unknown] (2,115 samples, 1.61%)[unknown] (1,226 samples, 0.93%)[unknown] (455 samples, 0.35%)[unknown] (408 samples, 0.31%)[unknown] (249 samples, 0.19%)[unknown] (202 samples, 0.15%)[unknown] (100 samples, 0.08%)mio::poll::Poll::poll (11,328 samples, 8.63%)mio::poll::P..mio::sys::unix::selector::epoll::Selector::select (11,328 samples, 8.63%)mio::sys::un..epoll_wait (11,229 samples, 8.55%)epoll_wait__GI___pthread_disable_asynccancel (50 samples, 0.04%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (47 samples, 0.04%)tokio::util::bit::Pack::pack (38 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (25 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (23 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (19 samples, 0.01%)tokio::runtime::io::driver::Driver::turn (11,595 samples, 8.83%)tokio::runti..tokio::runtime::io::scheduled_io::ScheduledIo::wake (175 samples, 0.13%)__GI___clock_gettime (15 samples, 0.01%)std::sys::pal::unix::time::Timespec::now (18 samples, 0.01%)tokio::runtime::time::<impl tokio::runtime::time::handle::Handle>::process (26 samples, 0.02%)tokio::runtime::time::source::TimeSource::now (26 samples, 0.02%)tokio::time::clock::Clock::now (20 samples, 0.02%)tokio::time::clock::now (20 samples, 0.02%)std::time::Instant::now (20 samples, 0.02%)std::sys::pal::unix::time::Instant::now (20 samples, 0.02%)tokio::runtime::time::source::TimeSource::now (17 samples, 0.01%)tokio::runtime::time::Driver::park_internal (11,686 samples, 8.90%)tokio::runtim..tokio::runtime::scheduler::multi_thread::park::Inner::park_driver (11,957 samples, 9.11%)tokio::runtim..tokio::runtime::driver::Driver::park (11,950 samples, 9.10%)tokio::runtim..tokio::runtime::driver::TimeDriver::park (11,950 samples, 9.10%)tokio::runtim..tokio::runtime::time::Driver::park (11,950 samples, 9.10%)tokio::runtim..tokio::runtime::scheduler::multi_thread::park::Parker::park (25,502 samples, 19.42%)tokio::runtime::scheduler::mul..tokio::runtime::scheduler::multi_thread::park::Inner::park (25,502 samples, 19.42%)tokio::runtime::scheduler::mul..tokio::runtime::scheduler::multi_thread::worker::Context::park_timeout (25,547 samples, 19.46%)tokio::runtime::scheduler::mul..core::result::Result<T,E>::is_err (14 samples, 0.01%)core::result::Result<T,E>::is_ok (14 samples, 0.01%)core::sync::atomic::AtomicU32::compare_exchange (45 samples, 0.03%)core::sync::atomic::atomic_compare_exchange (45 samples, 0.03%)tokio::loom::std::mutex::Mutex<T>::lock (84 samples, 0.06%)std::sync::mutex::Mutex<T>::lock (81 samples, 0.06%)std::sys::sync::mutex::futex::Mutex::lock (73 samples, 0.06%)tokio::runtime::scheduler::multi_thread::worker::Core::maintenance (122 samples, 0.09%)<T as core::slice::cmp::SliceContains>::slice_contains::{{closure}} (90 samples, 0.07%)core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq (90 samples, 0.07%)core::slice::<impl [T]>::contains (241 samples, 0.18%)<T as core::slice::cmp::SliceContains>::slice_contains (241 samples, 0.18%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::any (241 samples, 0.18%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (75 samples, 0.06%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (75 samples, 0.06%)core::sync::atomic::AtomicU32::compare_exchange (20 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (20 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::is_parked (283 samples, 0.22%)tokio::loom::std::mutex::Mutex<T>::lock (32 samples, 0.02%)std::sync::mutex::Mutex<T>::lock (32 samples, 0.02%)std::sys::sync::mutex::futex::Mutex::lock (24 samples, 0.02%)core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next (33 samples, 0.03%)<core::ops::range::Range<T> as core::iter::range::RangeIteratorImpl>::spec_next (33 samples, 0.03%)core::cmp::impls::<impl core::cmp::PartialOrd for usize>::lt (33 samples, 0.03%)tokio::runtime::scheduler::multi_thread::idle::Idle::unpark_worker_by_id (98 samples, 0.07%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (401 samples, 0.31%)alloc::vec::Vec<T,A>::push (14 samples, 0.01%)core::ptr::drop_in_place<std::sync::mutex::MutexGuard<tokio::runtime::scheduler::multi_thread::worker::Synced>> (15 samples, 0.01%)<std::sync::mutex::MutexGuard<T> as core::ops::drop::Drop>::drop (15 samples, 0.01%)std::sys::sync::mutex::futex::Mutex::unlock (14 samples, 0.01%)core::result::Result<T,E>::is_err (15 samples, 0.01%)core::result::Result<T,E>::is_ok (15 samples, 0.01%)core::sync::atomic::AtomicU32::compare_exchange (22 samples, 0.02%)core::sync::atomic::atomic_compare_exchange (22 samples, 0.02%)tokio::loom::std::mutex::Mutex<T>::lock (63 samples, 0.05%)std::sync::mutex::Mutex<T>::lock (62 samples, 0.05%)std::sys::sync::mutex::futex::Mutex::lock (59 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock_contended (21 samples, 0.02%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_parked (106 samples, 0.08%)tokio::runtime::scheduler::multi_thread::idle::State::dec_num_unparked (14 samples, 0.01%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (21 samples, 0.02%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (21 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (17 samples, 0.01%)alloc::sync::Arc<T,A>::inner (17 samples, 0.01%)core::ptr::non_null::NonNull<T>::as_ref (17 samples, 0.01%)core::sync::atomic::AtomicU32::load (17 samples, 0.01%)core::sync::atomic::atomic_load (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::is_empty (68 samples, 0.05%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::is_empty (51 samples, 0.04%)tokio::runtime::scheduler::multi_thread::queue::Inner<T>::len (33 samples, 0.03%)core::sync::atomic::AtomicU64::load (16 samples, 0.01%)core::sync::atomic::atomic_load (16 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_if_work_pending (106 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::Context::park (26,672 samples, 20.31%)tokio::runtime::scheduler::multi..tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_parked (272 samples, 0.21%)tokio::runtime::scheduler::multi_thread::worker::Core::has_tasks (33 samples, 0.03%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::has_tasks (24 samples, 0.02%)tokio::runtime::context::budget (18 samples, 0.01%)std::thread::local::LocalKey<T>::try_with (18 samples, 0.01%)syscall (61 samples, 0.05%)__memcpy_avx512_unaligned_erms (172 samples, 0.13%)__memcpy_avx512_unaligned_erms (224 samples, 0.17%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (228 samples, 0.17%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (228 samples, 0.17%)std::panic::catch_unwind (415 samples, 0.32%)std::panicking::try (415 samples, 0.32%)std::panicking::try::do_call (415 samples, 0.32%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (415 samples, 0.32%)core::ops::function::FnOnce::call_once (415 samples, 0.32%)tokio::runtime::task::harness::Harness<T,S>::complete::{{closure}} (415 samples, 0.32%)tokio::runtime::task::core::Core<T,S>::drop_future_or_output (415 samples, 0.32%)tokio::runtime::task::core::Core<T,S>::set_stage (410 samples, 0.31%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (27 samples, 0.02%)core::result::Result<T,E>::is_err (43 samples, 0.03%)core::result::Result<T,E>::is_ok (43 samples, 0.03%)tokio::runtime::task::harness::Harness<T,S>::complete (570 samples, 0.43%)tokio::runtime::task::harness::Harness<T,S>::release (155 samples, 0.12%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::task::Schedule for alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>::release (152 samples, 0.12%)tokio::runtime::task::list::OwnedTasks<S>::remove (152 samples, 0.12%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::remove (103 samples, 0.08%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (65 samples, 0.05%)tokio::loom::std::mutex::Mutex<T>::lock (58 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (58 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (54 samples, 0.04%)std::io::stdio::stderr::INSTANCE (17 samples, 0.01%)tokio::runtime::coop::budget (26 samples, 0.02%)tokio::runtime::coop::with_budget (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (35 samples, 0.03%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (70 samples, 0.05%)__memcpy_avx512_unaligned_erms (42 samples, 0.03%)core::cmp::Ord::min (22 samples, 0.02%)core::cmp::min_by (22 samples, 0.02%)std::io::cursor::Cursor<T>::remaining_slice (27 samples, 0.02%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (30 samples, 0.02%)std::io::cursor::Cursor<T>::remaining_slice (24 samples, 0.02%)core::slice::index::<impl core::ops::index::Index<I> for [T]>::index (19 samples, 0.01%)<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::index (19 samples, 0.01%)<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked (19 samples, 0.01%)<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked (19 samples, 0.01%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (44 samples, 0.03%)std::io::impls::<impl std::io::Read for &[u8]>::read_exact (20 samples, 0.02%)byteorder::io::ReadBytesExt::read_i32 (46 samples, 0.04%)core::cmp::Ord::min (14 samples, 0.01%)core::cmp::min_by (14 samples, 0.01%)std::io::cursor::Cursor<T>::remaining_slice (19 samples, 0.01%)byteorder::io::ReadBytesExt::read_i64 (24 samples, 0.02%)<std::io::cursor::Cursor<T> as std::io::Read>::read_exact (24 samples, 0.02%)aquatic_udp_protocol::request::Request::from_bytes (349 samples, 0.27%)__GI___lll_lock_wake_private (148 samples, 0.11%)[unknown] (139 samples, 0.11%)[unknown] (137 samples, 0.10%)[unknown] (123 samples, 0.09%)[unknown] (111 samples, 0.08%)[unknown] (98 samples, 0.07%)[unknown] (42 samples, 0.03%)[unknown] (30 samples, 0.02%)__GI___lll_lock_wait_private (553 samples, 0.42%)futex_wait (541 samples, 0.41%)[unknown] (536 samples, 0.41%)[unknown] (531 samples, 0.40%)[unknown] (524 samples, 0.40%)[unknown] (515 samples, 0.39%)[unknown] (498 samples, 0.38%)[unknown] (470 samples, 0.36%)[unknown] (435 samples, 0.33%)[unknown] (350 samples, 0.27%)[unknown] (327 samples, 0.25%)[unknown] (290 samples, 0.22%)[unknown] (222 samples, 0.17%)[unknown] (160 samples, 0.12%)[unknown] (104 samples, 0.08%)[unknown] (33 samples, 0.03%)[unknown] (25 samples, 0.02%)[unknown] (18 samples, 0.01%)[unknown] (14 samples, 0.01%)_int_free (703 samples, 0.54%)__GI___libc_free (866 samples, 0.66%)tracing::span::Span::record_all (30 samples, 0.02%)unlink_chunk (26 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::servers::udp::UdpRequest> (899 samples, 0.68%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (899 samples, 0.68%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (899 samples, 0.68%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (899 samples, 0.68%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (899 samples, 0.68%)alloc::alloc::dealloc (899 samples, 0.68%)__rdl_dealloc (899 samples, 0.68%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (899 samples, 0.68%)core::result::Result<T,E>::expect (91 samples, 0.07%)core::result::Result<T,E>::map_err (28 samples, 0.02%)[[vdso]] (28 samples, 0.02%)__GI___clock_gettime (47 samples, 0.04%)std::time::Instant::elapsed (67 samples, 0.05%)std::time::Instant::now (54 samples, 0.04%)std::sys::pal::unix::time::Instant::now (54 samples, 0.04%)std::sys::pal::unix::time::Timespec::now (53 samples, 0.04%)std::sys::pal::unix::cvt (23 samples, 0.02%)__GI_getsockname (3,792 samples, 2.89%)__..[unknown] (3,714 samples, 2.83%)[u..[unknown] (3,661 samples, 2.79%)[u..[unknown] (3,557 samples, 2.71%)[u..[unknown] (3,416 samples, 2.60%)[u..[unknown] (2,695 samples, 2.05%)[..[unknown] (2,063 samples, 1.57%)[unknown] (891 samples, 0.68%)[unknown] (270 samples, 0.21%)[unknown] (99 samples, 0.08%)[unknown] (94 samples, 0.07%)[unknown] (84 samples, 0.06%)[unknown] (77 samples, 0.06%)[unknown] (25 samples, 0.02%)[unknown] (16 samples, 0.01%)std::sys_common::net::TcpListener::socket_addr::{{closure}} (3,800 samples, 2.89%)st..tokio::net::udp::UdpSocket::local_addr (3,838 samples, 2.92%)to..mio::net::udp::UdpSocket::local_addr (3,838 samples, 2.92%)mi..std::net::tcp::TcpListener::local_addr (3,838 samples, 2.92%)st..std::sys_common::net::TcpListener::socket_addr (3,838 samples, 2.92%)st..std::sys_common::net::sockname (3,835 samples, 2.92%)st..[[vdso]] (60 samples, 0.05%)rand_chacha::guts::ChaCha::pos64 (168 samples, 0.13%)<ppv_lite86::soft::x2<W,G> as core::ops::arith::AddAssign>::add_assign (26 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as core::ops::arith::AddAssign>::add_assign (26 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as core::ops::arith::Add>::add (26 samples, 0.02%)core::core_arch::x86::avx2::_mm256_add_epi32 (26 samples, 0.02%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right16 (26 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right16 (26 samples, 0.02%)core::core_arch::x86::avx2::_mm256_shuffle_epi8 (26 samples, 0.02%)core::core_arch::x86::avx2::_mm256_or_si256 (29 samples, 0.02%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (31 samples, 0.02%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right20 (31 samples, 0.02%)<ppv_lite86::soft::x2<W,G> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right24 (18 samples, 0.01%)<ppv_lite86::x86_64::sse2::avx2::u32x4x2_avx2<NI> as ppv_lite86::types::RotateEachWord32>::rotate_each_word_right24 (18 samples, 0.01%)core::core_arch::x86::avx2::_mm256_shuffle_epi8 (18 samples, 0.01%)rand_chacha::guts::round (118 samples, 0.09%)rand_chacha::guts::refill_wide::impl_avx2 (312 samples, 0.24%)rand_chacha::guts::refill_wide::fn_impl (312 samples, 0.24%)rand_chacha::guts::refill_wide_impl (312 samples, 0.24%)<rand_chacha::chacha::ChaCha12Core as rand_core::block::BlockRngCore>::generate (384 samples, 0.29%)rand_chacha::guts::ChaCha::refill4 (384 samples, 0.29%)rand::rng::Rng::gen (432 samples, 0.33%)rand::distributions::other::<impl rand::distributions::distribution::Distribution<[T: _]> for rand::distributions::Standard>::sample (432 samples, 0.33%)rand::rng::Rng::gen (432 samples, 0.33%)rand::distributions::integer::<impl rand::distributions::distribution::Distribution<u8> for rand::distributions::Standard>::sample (432 samples, 0.33%)<rand::rngs::thread::ThreadRng as rand_core::RngCore>::next_u32 (432 samples, 0.33%)<rand::rngs::adapter::reseeding::ReseedingRng<R,Rsdr> as rand_core::RngCore>::next_u32 (432 samples, 0.33%)<rand_core::block::BlockRng<R> as rand_core::RngCore>::next_u32 (432 samples, 0.33%)rand_core::block::BlockRng<R>::generate_and_set (392 samples, 0.30%)<rand::rngs::adapter::reseeding::ReseedingCore<R,Rsdr> as rand_core::block::BlockRngCore>::generate (392 samples, 0.30%)torrust_tracker::servers::udp::handlers::RequestId::make (440 samples, 0.34%)uuid::v4::<impl uuid::Uuid>::new_v4 (436 samples, 0.33%)uuid::rng::bytes (435 samples, 0.33%)rand::random (435 samples, 0.33%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::get_peers_for_client (34 samples, 0.03%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_peers_for_client (22 samples, 0.02%)core::iter::traits::iterator::Iterator::collect (16 samples, 0.01%)<alloc::vec::Vec<T> as core::iter::traits::collect::FromIterator<T>>::from_iter (16 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter::SpecFromIter<T,I>>::from_iter (16 samples, 0.01%)<alloc::vec::Vec<T> as alloc::vec::spec_from_iter_nested::SpecFromIterNested<T,I>>::from_iter (16 samples, 0.01%)<core::iter::adapters::cloned::Cloned<I> as core::iter::traits::iterator::Iterator>::next (16 samples, 0.01%)<core::iter::adapters::take::Take<I> as core::iter::traits::iterator::Iterator>::next (16 samples, 0.01%)<core::iter::adapters::filter::Filter<I,P> as core::iter::traits::iterator::Iterator>::next (15 samples, 0.01%)core::iter::traits::iterator::Iterator::find (15 samples, 0.01%)core::iter::traits::iterator::Iterator::try_fold (15 samples, 0.01%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (31 samples, 0.02%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (45 samples, 0.03%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (45 samples, 0.03%)core::slice::iter::Iter<T>::post_inc_start (14 samples, 0.01%)core::ptr::non_null::NonNull<T>::add (14 samples, 0.01%)__memcmp_evex_movbe (79 samples, 0.06%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (26 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (165 samples, 0.13%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (165 samples, 0.13%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (165 samples, 0.13%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (165 samples, 0.13%)<u8 as core::slice::cmp::SliceOrd>::compare (165 samples, 0.13%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (339 samples, 0.26%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (308 samples, 0.23%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (308 samples, 0.23%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (342 samples, 0.26%)std::sys::sync::rwlock::futex::RwLock::spin_read (25 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::spin_until (25 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read_contended (28 samples, 0.02%)torrust_tracker::core::Tracker::get_torrent_peers_for_peer (436 samples, 0.33%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (397 samples, 0.30%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (29 samples, 0.02%)std::sync::rwlock::RwLock<T>::read (29 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read (29 samples, 0.02%)__memcmp_evex_movbe (31 samples, 0.02%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (52 samples, 0.04%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (52 samples, 0.04%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (52 samples, 0.04%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (52 samples, 0.04%)<u8 as core::slice::cmp::SliceOrd>::compare (52 samples, 0.04%)alloc::collections::btree::map::BTreeMap<K,V,A>::entry (103 samples, 0.08%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (102 samples, 0.08%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (96 samples, 0.07%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (96 samples, 0.07%)<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq (72 samples, 0.05%)<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next (104 samples, 0.08%)<core::slice::iter::Iter<T> as core::iter::traits::iterator::Iterator>::next (104 samples, 0.08%)core::slice::iter::Iter<T>::post_inc_start (32 samples, 0.02%)core::ptr::non_null::NonNull<T>::add (32 samples, 0.02%)__memcmp_evex_movbe (79 samples, 0.06%)core::cmp::impls::<impl core::cmp::Ord for isize>::cmp (81 samples, 0.06%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (271 samples, 0.21%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (271 samples, 0.21%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (271 samples, 0.21%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (271 samples, 0.21%)<u8 as core::slice::cmp::SliceOrd>::compare (271 samples, 0.21%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (610 samples, 0.46%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (566 samples, 0.43%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (566 samples, 0.43%)alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Immut,K,V,Type>::keys (18 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (616 samples, 0.47%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::KV>::split (15 samples, 0.01%)alloc::collections::btree::map::entry::Entry<K,V,A>::or_insert (46 samples, 0.04%)alloc::collections::btree::map::entry::VacantEntry<K,V,A>::insert (45 samples, 0.03%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert_recursing (40 samples, 0.03%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Mut,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>::insert (27 samples, 0.02%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::get_stats (29 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::values (20 samples, 0.02%)alloc::collections::btree::map::BTreeMap<K,V,A>::insert (120 samples, 0.09%)alloc::collections::btree::map::entry::VacantEntry<K,V,A>::insert (118 samples, 0.09%)alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Owned,K,V,alloc::collections::btree::node::marker::Leaf>::new_leaf (118 samples, 0.09%)alloc::collections::btree::node::LeafNode<K,V>::new (118 samples, 0.09%)alloc::boxed::Box<T,A>::new_uninit_in (118 samples, 0.09%)alloc::boxed::Box<T,A>::try_new_uninit_in (118 samples, 0.09%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (118 samples, 0.09%)alloc::alloc::Global::alloc_impl (118 samples, 0.09%)alloc::alloc::alloc (118 samples, 0.09%)__rdl_alloc (118 samples, 0.09%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (118 samples, 0.09%)__GI___libc_malloc (118 samples, 0.09%)_int_malloc (107 samples, 0.08%)_int_malloc (28 samples, 0.02%)__GI___libc_malloc (32 samples, 0.02%)__rdl_alloc (36 samples, 0.03%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (36 samples, 0.03%)alloc::sync::Arc<T>::new (42 samples, 0.03%)alloc::boxed::Box<T>::new (42 samples, 0.03%)alloc::alloc::exchange_malloc (39 samples, 0.03%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (39 samples, 0.03%)alloc::alloc::Global::alloc_impl (39 samples, 0.03%)alloc::alloc::alloc (39 samples, 0.03%)core::mem::drop (15 samples, 0.01%)core::ptr::drop_in_place<core::option::Option<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (15 samples, 0.01%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (15 samples, 0.01%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (15 samples, 0.01%)__GI___libc_free (39 samples, 0.03%)_int_free (37 samples, 0.03%)get_max_fast (16 samples, 0.01%)core::option::Option<T>::is_some_and (50 samples, 0.04%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer::{{closure}} (50 samples, 0.04%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>> (50 samples, 0.04%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (50 samples, 0.04%)torrust_tracker_torrent_repository::entry::mutex_std::<impl torrust_tracker_torrent_repository::entry::EntrySync for alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>::insert_or_update_peer_and_get_stats (290 samples, 0.22%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer_and_get_stats (284 samples, 0.22%)torrust_tracker_torrent_repository::entry::single::<impl torrust_tracker_torrent_repository::entry::Entry for torrust_tracker_torrent_repository::entry::Torrent>::insert_or_update_peer (255 samples, 0.19%)std::sys::sync::rwlock::futex::RwLock::spin_read (16 samples, 0.01%)std::sys::sync::rwlock::futex::RwLock::spin_until (16 samples, 0.01%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents (21 samples, 0.02%)std::sync::rwlock::RwLock<T>::read (21 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read (21 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::read_contended (21 samples, 0.02%)torrust_tracker::core::Tracker::update_torrent_with_peer_and_get_stats::{{closure}} (1,147 samples, 0.87%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::update_torrent_with_peer_and_get_stats (1,144 samples, 0.87%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get_torrents_mut (32 samples, 0.02%)std::sync::rwlock::RwLock<T>::write (32 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::write (32 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::write_contended (32 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::spin_write (28 samples, 0.02%)std::sys::sync::rwlock::futex::RwLock::spin_until (28 samples, 0.02%)torrust_tracker::core::Tracker::announce::{{closure}} (1,597 samples, 1.22%)<core::net::socket_addr::SocketAddrV4 as core::hash::Hash>::hash (14 samples, 0.01%)<core::net::ip_addr::Ipv4Addr as core::hash::Hash>::hash (14 samples, 0.01%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (29 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (24 samples, 0.02%)<core::time::Nanoseconds as core::hash::Hash>::hash (25 samples, 0.02%)core::hash::impls::<impl core::hash::Hash for u32>::hash (25 samples, 0.02%)core::hash::Hasher::write_u32 (25 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (25 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (25 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (36 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (37 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (37 samples, 0.03%)<core::time::Duration as core::hash::Hash>::hash (64 samples, 0.05%)core::hash::impls::<impl core::hash::Hash for u64>::hash (39 samples, 0.03%)core::hash::Hasher::write_u64 (39 samples, 0.03%)<torrust_tracker_clock::time_extent::TimeExtent as core::hash::Hash>::hash (122 samples, 0.09%)core::hash::impls::<impl core::hash::Hash for u64>::hash (58 samples, 0.04%)core::hash::Hasher::write_u64 (58 samples, 0.04%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (58 samples, 0.04%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (58 samples, 0.04%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (57 samples, 0.04%)core::hash::sip::u8to64_le (23 samples, 0.02%)core::hash::Hasher::write_length_prefix (27 samples, 0.02%)core::hash::Hasher::write_usize (27 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (27 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (27 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (27 samples, 0.02%)<core::hash::sip::Sip13Rounds as core::hash::sip::Sip>::c_rounds (16 samples, 0.01%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (246 samples, 0.19%)core::array::<impl core::hash::Hash for [T: N]>::hash (93 samples, 0.07%)core::hash::impls::<impl core::hash::Hash for [T]>::hash (93 samples, 0.07%)core::hash::impls::<impl core::hash::Hash for u8>::hash_slice (66 samples, 0.05%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (66 samples, 0.05%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (66 samples, 0.05%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (62 samples, 0.05%)core::hash::sip::u8to64_le (17 samples, 0.01%)torrust_tracker::servers::udp::connection_cookie::check (285 samples, 0.22%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (36 samples, 0.03%)torrust_tracker_clock::time_extent::Make::now (36 samples, 0.03%)torrust_tracker_clock::clock::working::<impl torrust_tracker_clock::clock::Time for torrust_tracker_clock::clock::Clock<torrust_tracker_clock::clock::working::WorkingClock>>::now (24 samples, 0.02%)std::time::SystemTime::now (19 samples, 0.01%)std::sys::pal::unix::time::SystemTime::now (19 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_announce::{{closure}} (1,954 samples, 1.49%)<core::net::socket_addr::SocketAddr as core::hash::Hash>::hash (24 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (18 samples, 0.01%)<core::time::Nanoseconds as core::hash::Hash>::hash (20 samples, 0.02%)core::hash::impls::<impl core::hash::Hash for u32>::hash (20 samples, 0.02%)core::hash::Hasher::write_u32 (20 samples, 0.02%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (20 samples, 0.02%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (20 samples, 0.02%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (44 samples, 0.03%)<core::time::Duration as core::hash::Hash>::hash (65 samples, 0.05%)core::hash::impls::<impl core::hash::Hash for u64>::hash (45 samples, 0.03%)core::hash::Hasher::write_u64 (45 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (45 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (45 samples, 0.03%)<torrust_tracker_clock::time_extent::TimeExtent as core::hash::Hash>::hash (105 samples, 0.08%)core::hash::impls::<impl core::hash::Hash for u64>::hash (40 samples, 0.03%)core::hash::Hasher::write_u64 (40 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (40 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (40 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (39 samples, 0.03%)core::hash::Hasher::write_length_prefix (34 samples, 0.03%)core::hash::Hasher::write_usize (34 samples, 0.03%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (34 samples, 0.03%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (34 samples, 0.03%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (33 samples, 0.03%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::build (231 samples, 0.18%)core::array::<impl core::hash::Hash for [T: N]>::hash (100 samples, 0.08%)core::hash::impls::<impl core::hash::Hash for [T]>::hash (100 samples, 0.08%)core::hash::impls::<impl core::hash::Hash for u8>::hash_slice (66 samples, 0.05%)<std::hash::random::DefaultHasher as core::hash::Hasher>::write (66 samples, 0.05%)<core::hash::sip::SipHasher13 as core::hash::Hasher>::write (66 samples, 0.05%)<core::hash::sip::Hasher<S> as core::hash::Hasher>::write (61 samples, 0.05%)core::hash::sip::u8to64_le (16 samples, 0.01%)_int_free (16 samples, 0.01%)torrust_tracker::servers::udp::handlers::handle_connect::{{closure}} (270 samples, 0.21%)torrust_tracker::servers::udp::connection_cookie::make (268 samples, 0.20%)torrust_tracker::servers::udp::connection_cookie::cookie_builder::get_last_time_extent (36 samples, 0.03%)torrust_tracker_clock::time_extent::Make::now (35 samples, 0.03%)torrust_tracker_clock::clock::working::<impl torrust_tracker_clock::clock::Time for torrust_tracker_clock::clock::Clock<torrust_tracker_clock::clock::working::WorkingClock>>::now (31 samples, 0.02%)std::time::SystemTime::now (26 samples, 0.02%)std::sys::pal::unix::time::SystemTime::now (26 samples, 0.02%)torrust_tracker::core::ScrapeData::add_file (19 samples, 0.01%)std::collections::hash::map::HashMap<K,V,S>::insert (19 samples, 0.01%)hashbrown::map::HashMap<K,V,S,A>::insert (19 samples, 0.01%)hashbrown::raw::RawTable<T,A>::find_or_find_insert_slot (16 samples, 0.01%)hashbrown::raw::RawTable<T,A>::reserve (16 samples, 0.01%)<torrust_tracker_primitives::info_hash::InfoHash as core::cmp::Ord>::cmp (17 samples, 0.01%)core::array::<impl core::cmp::Ord for [T: N]>::cmp (17 samples, 0.01%)core::cmp::impls::<impl core::cmp::Ord for &A>::cmp (17 samples, 0.01%)core::slice::cmp::<impl core::cmp::Ord for [T]>::cmp (17 samples, 0.01%)<u8 as core::slice::cmp::SliceOrd>::compare (17 samples, 0.01%)alloc::collections::btree::map::BTreeMap<K,V,A>::get (61 samples, 0.05%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,alloc::collections::btree::node::marker::LeafOrInternal>>::search_tree (61 samples, 0.05%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::search_node (53 samples, 0.04%)alloc::collections::btree::search::<impl alloc::collections::btree::node::NodeRef<BorrowType,K,V,Type>>::find_key_index (53 samples, 0.04%)torrust_tracker::servers::udp::handlers::handle_request::{{closure}} (2,336 samples, 1.78%)t..torrust_tracker::servers::udp::handlers::handle_scrape::{{closure}} (101 samples, 0.08%)torrust_tracker::core::Tracker::scrape::{{closure}} (90 samples, 0.07%)torrust_tracker::core::Tracker::get_swarm_metadata (68 samples, 0.05%)torrust_tracker_torrent_repository::repository::rw_lock_std_mutex_std::<impl torrust_tracker_torrent_repository::repository::Repository<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> for torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>::get (64 samples, 0.05%)alloc::raw_vec::finish_grow (19 samples, 0.01%)alloc::vec::Vec<T,A>::reserve (21 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::reserve (21 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (21 samples, 0.02%)alloc::raw_vec::RawVec<T,A>::grow_amortized (21 samples, 0.02%)<alloc::string::String as core::fmt::Write>::write_str (23 samples, 0.02%)alloc::string::String::push_str (23 samples, 0.02%)alloc::vec::Vec<T,A>::extend_from_slice (23 samples, 0.02%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (23 samples, 0.02%)alloc::vec::Vec<T,A>::append_elements (23 samples, 0.02%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (85 samples, 0.06%)core::fmt::num::imp::fmt_u64 (78 samples, 0.06%)<alloc::string::String as core::fmt::Write>::write_str (15 samples, 0.01%)alloc::string::String::push_str (15 samples, 0.01%)alloc::vec::Vec<T,A>::extend_from_slice (15 samples, 0.01%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (15 samples, 0.01%)alloc::vec::Vec<T,A>::append_elements (15 samples, 0.01%)core::fmt::num::imp::<impl core::fmt::Display for i64>::fmt (37 samples, 0.03%)core::fmt::num::imp::fmt_u64 (36 samples, 0.03%)<T as alloc::string::ToString>::to_string (141 samples, 0.11%)core::option::Option<T>::expect (34 samples, 0.03%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (28 samples, 0.02%)alloc::alloc::dealloc (28 samples, 0.02%)__rdl_dealloc (28 samples, 0.02%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (28 samples, 0.02%)core::ptr::drop_in_place<alloc::string::String> (55 samples, 0.04%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (55 samples, 0.04%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (55 samples, 0.04%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (55 samples, 0.04%)alloc::raw_vec::RawVec<T,A>::current_memory (20 samples, 0.02%)torrust_tracker::servers::udp::logging::map_action_name (16 samples, 0.01%)binascii::bin2hex (51 samples, 0.04%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (16 samples, 0.01%)core::fmt::write (25 samples, 0.02%)core::fmt::rt::Argument::fmt (15 samples, 0.01%)core::fmt::Formatter::write_fmt (87 samples, 0.07%)core::str::converts::from_utf8 (43 samples, 0.03%)core::str::validations::run_utf8_validation (37 samples, 0.03%)torrust_tracker_primitives::info_hash::InfoHash::to_hex_string (161 samples, 0.12%)<T as alloc::string::ToString>::to_string (161 samples, 0.12%)<torrust_tracker_primitives::info_hash::InfoHash as core::fmt::Display>::fmt (156 samples, 0.12%)torrust_tracker::servers::udp::logging::log_request (479 samples, 0.36%)[[vdso]] (51 samples, 0.04%)alloc::raw_vec::finish_grow (56 samples, 0.04%)alloc::vec::Vec<T,A>::reserve (64 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::reserve (64 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::reserve::do_reserve_and_handle (64 samples, 0.05%)alloc::raw_vec::RawVec<T,A>::grow_amortized (64 samples, 0.05%)<alloc::string::String as core::fmt::Write>::write_str (65 samples, 0.05%)alloc::string::String::push_str (65 samples, 0.05%)alloc::vec::Vec<T,A>::extend_from_slice (65 samples, 0.05%)<alloc::vec::Vec<T,A> as alloc::vec::spec_extend::SpecExtend<&T,core::slice::iter::Iter<T>>>::spec_extend (65 samples, 0.05%)alloc::vec::Vec<T,A>::append_elements (65 samples, 0.05%)core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt (114 samples, 0.09%)core::fmt::num::imp::fmt_u64 (110 samples, 0.08%)<T as alloc::string::ToString>::to_string (132 samples, 0.10%)core::option::Option<T>::expect (20 samples, 0.02%)core::ptr::drop_in_place<alloc::string::String> (22 samples, 0.02%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (22 samples, 0.02%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (22 samples, 0.02%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (22 samples, 0.02%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (8,883 samples, 6.77%)torrust_t..torrust_tracker::servers::udp::logging::log_response (238 samples, 0.18%)__GI___lll_lock_wait_private (14 samples, 0.01%)futex_wait (14 samples, 0.01%)__GI___lll_lock_wake_private (20 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (17 samples, 0.01%)_int_malloc (191 samples, 0.15%)__libc_calloc (238 samples, 0.18%)__memcpy_avx512_unaligned_erms (34 samples, 0.03%)alloc::vec::from_elem (316 samples, 0.24%)<u8 as alloc::vec::spec_from_elem::SpecFromElem>::from_elem (316 samples, 0.24%)alloc::raw_vec::RawVec<T,A>::with_capacity_zeroed_in (316 samples, 0.24%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (316 samples, 0.24%)<alloc::alloc::Global as core::alloc::Allocator>::allocate_zeroed (312 samples, 0.24%)alloc::alloc::Global::alloc_impl (312 samples, 0.24%)alloc::alloc::alloc_zeroed (312 samples, 0.24%)__rdl_alloc_zeroed (312 samples, 0.24%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc_zeroed (312 samples, 0.24%)byteorder::ByteOrder::write_i32 (18 samples, 0.01%)<byteorder::BigEndian as byteorder::ByteOrder>::write_u32 (18 samples, 0.01%)core::num::<impl u32>::to_be_bytes (18 samples, 0.01%)core::num::<impl u32>::to_be (18 samples, 0.01%)core::num::<impl u32>::swap_bytes (18 samples, 0.01%)byteorder::io::WriteBytesExt::write_i32 (89 samples, 0.07%)std::io::Write::write_all (71 samples, 0.05%)<std::io::cursor::Cursor<alloc::vec::Vec<u8,A>> as std::io::Write>::write (71 samples, 0.05%)std::io::cursor::vec_write (71 samples, 0.05%)std::io::cursor::vec_write_unchecked (51 samples, 0.04%)core::ptr::mut_ptr::<impl *mut T>::copy_from (51 samples, 0.04%)core::intrinsics::copy (51 samples, 0.04%)aquatic_udp_protocol::response::Response::write (227 samples, 0.17%)byteorder::io::WriteBytesExt::write_i64 (28 samples, 0.02%)std::io::Write::write_all (21 samples, 0.02%)<std::io::cursor::Cursor<alloc::vec::Vec<u8,A>> as std::io::Write>::write (21 samples, 0.02%)std::io::cursor::vec_write (21 samples, 0.02%)std::io::cursor::vec_write_unchecked (21 samples, 0.02%)core::ptr::mut_ptr::<impl *mut T>::copy_from (21 samples, 0.02%)core::intrinsics::copy (21 samples, 0.02%)__GI___lll_lock_wake_private (17 samples, 0.01%)[unknown] (15 samples, 0.01%)[unknown] (14 samples, 0.01%)__GI___lll_lock_wait_private (16 samples, 0.01%)futex_wait (15 samples, 0.01%)[unknown] (14 samples, 0.01%)[unknown] (14 samples, 0.01%)[unknown] (14 samples, 0.01%)[unknown] (14 samples, 0.01%)_int_free (136 samples, 0.10%)__GI___libc_free (206 samples, 0.16%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (211 samples, 0.16%)alloc::alloc::dealloc (211 samples, 0.16%)__rdl_dealloc (211 samples, 0.16%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (211 samples, 0.16%)core::ptr::drop_in_place<std::io::cursor::Cursor<alloc::vec::Vec<u8>>> (224 samples, 0.17%)core::ptr::drop_in_place<alloc::vec::Vec<u8>> (224 samples, 0.17%)core::ptr::drop_in_place<alloc::raw_vec::RawVec<u8>> (224 samples, 0.17%)<alloc::raw_vec::RawVec<T,A> as core::ops::drop::Drop>::drop (224 samples, 0.17%)std::io::cursor::Cursor<T>::new (56 samples, 0.04%)tokio::io::ready::Ready::intersection (23 samples, 0.02%)tokio::io::ready::Ready::from_interest (23 samples, 0.02%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (83 samples, 0.06%)[unknown] (32,674 samples, 24.88%)[unknown][unknown] (32,402 samples, 24.68%)[unknown][unknown] (32,272 samples, 24.58%)[unknown][unknown] (32,215 samples, 24.54%)[unknown][unknown] (31,174 samples, 23.74%)[unknown][unknown] (30,794 samples, 23.45%)[unknown][unknown] (30,036 samples, 22.88%)[unknown][unknown] (28,639 samples, 21.81%)[unknown][unknown] (27,908 samples, 21.25%)[unknown][unknown] (26,013 samples, 19.81%)[unknown][unknown] (23,181 samples, 17.65%)[unknown][unknown] (19,559 samples, 14.90%)[unknown][unknown] (18,052 samples, 13.75%)[unknown][unknown] (15,794 samples, 12.03%)[unknown][unknown] (14,740 samples, 11.23%)[unknown][unknown] (12,486 samples, 9.51%)[unknown][unknown] (11,317 samples, 8.62%)[unknown][unknown] (10,725 samples, 8.17%)[unknown][unknown] (10,017 samples, 7.63%)[unknown][unknown] (9,713 samples, 7.40%)[unknown][unknown] (8,432 samples, 6.42%)[unknown][unknown] (8,062 samples, 6.14%)[unknown][unknown] (6,973 samples, 5.31%)[unknow..[unknown] (5,328 samples, 4.06%)[unk..[unknown] (4,352 samples, 3.31%)[un..[unknown] (3,786 samples, 2.88%)[u..[unknown] (3,659 samples, 2.79%)[u..[unknown] (3,276 samples, 2.50%)[u..[unknown] (2,417 samples, 1.84%)[..[unknown] (2,115 samples, 1.61%)[unknown] (1,610 samples, 1.23%)[unknown] (422 samples, 0.32%)[unknown] (84 samples, 0.06%)[unknown] (69 samples, 0.05%)__GI___pthread_disable_asynccancel (67 samples, 0.05%)__libc_sendto (32,896 samples, 25.05%)__libc_sendtotokio::net::udp::UdpSocket::send_to_addr::{{closure}}::{{closure}} (32,981 samples, 25.12%)tokio::net::udp::UdpSocket::send_to_addr..mio::net::udp::UdpSocket::send_to (32,981 samples, 25.12%)mio::net::udp::UdpSocket::send_tomio::io_source::IoSource<T>::do_io (32,981 samples, 25.12%)mio::io_source::IoSource<T>::do_iomio::sys::unix::stateless_io_source::IoSourceState::do_io (32,981 samples, 25.12%)mio::sys::unix::stateless_io_source::IoS..mio::net::udp::UdpSocket::send_to::{{closure}} (32,981 samples, 25.12%)mio::net::udp::UdpSocket::send_to::{{clo..std::net::udp::UdpSocket::send_to (32,981 samples, 25.12%)std::net::udp::UdpSocket::send_tostd::sys_common::net::UdpSocket::send_to (32,981 samples, 25.12%)std::sys_common::net::UdpSocket::send_tostd::sys::pal::unix::cvt (85 samples, 0.06%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (44,349 samples, 33.78%)torrust_tracker::servers::udp::server::Udp::process_req..torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (43,412 samples, 33.06%)torrust_tracker::servers::udp::server::Udp::process_va..torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (34,320 samples, 26.14%)torrust_tracker::servers::udp::server::Udp..torrust_tracker::servers::udp::server::Udp::send_packet::{{closure}} (33,360 samples, 25.41%)torrust_tracker::servers::udp::server::Ud..tokio::net::udp::UdpSocket::send_to::{{closure}} (33,227 samples, 25.31%)tokio::net::udp::UdpSocket::send_to::{{c..tokio::net::udp::UdpSocket::send_to_addr::{{closure}} (33,142 samples, 25.24%)tokio::net::udp::UdpSocket::send_to_addr..tokio::runtime::io::registration::Registration::async_io::{{closure}} (33,115 samples, 25.22%)tokio::runtime::io::registration::Regist..tokio::runtime::io::registration::Registration::readiness::{{closure}} (28 samples, 0.02%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (18 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (15 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (14 samples, 0.01%)<alloc::sync::Arc<T,A> as core::clone::Clone>::clone (15 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (15 samples, 0.01%)core::sync::atomic::atomic_add (15 samples, 0.01%)__GI___lll_lock_wait_private (16 samples, 0.01%)futex_wait (16 samples, 0.01%)[unknown] (16 samples, 0.01%)[unknown] (15 samples, 0.01%)[unknown] (15 samples, 0.01%)[unknown] (14 samples, 0.01%)[unknown] (14 samples, 0.01%)_int_free (135 samples, 0.10%)__GI___libc_free (147 samples, 0.11%)syscall (22 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::core::Core<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>> (15 samples, 0.01%)tokio::runtime::task::harness::Harness<T,S>::dealloc (24 samples, 0.02%)core::mem::drop (24 samples, 0.02%)core::ptr::drop_in_place<alloc::boxed::Box<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>>> (24 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::core::Cell<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}},alloc::sync::Arc<tokio::runtime::scheduler::multi_thread::handle::Handle>>> (24 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::task::abort::AbortHandle> (262 samples, 0.20%)<tokio::runtime::task::abort::AbortHandle as core::ops::drop::Drop>::drop (262 samples, 0.20%)tokio::runtime::task::raw::RawTask::drop_abort_handle (256 samples, 0.19%)tokio::runtime::task::raw::drop_abort_handle (59 samples, 0.04%)tokio::runtime::task::harness::Harness<T,S>::drop_reference (50 samples, 0.04%)tokio::runtime::task::state::State::ref_dec (50 samples, 0.04%)tokio::runtime::task::raw::RawTask::drop_join_handle_slow (16 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::join::JoinHandle<()>> (47 samples, 0.04%)<tokio::runtime::task::join::JoinHandle<T> as core::ops::drop::Drop>::drop (47 samples, 0.04%)tokio::runtime::task::state::State::drop_join_handle_fast (19 samples, 0.01%)core::sync::atomic::AtomicUsize::compare_exchange_weak (19 samples, 0.01%)core::sync::atomic::atomic_compare_exchange_weak (19 samples, 0.01%)ringbuf::ring_buffer::base::RbBase::is_full (14 samples, 0.01%)<ringbuf::ring_buffer::shared::SharedRb<T,C> as ringbuf::ring_buffer::base::RbBase<T>>::head (14 samples, 0.01%)core::sync::atomic::AtomicUsize::load (14 samples, 0.01%)core::sync::atomic::atomic_load (14 samples, 0.01%)ringbuf::consumer::Consumer<T,R>::advance (29 samples, 0.02%)ringbuf::ring_buffer::base::RbRead::advance_head (29 samples, 0.02%)ringbuf::ring_buffer::rb::Rb::pop (50 samples, 0.04%)ringbuf::consumer::Consumer<T,R>::pop (50 samples, 0.04%)ringbuf::producer::Producer<T,R>::advance (23 samples, 0.02%)ringbuf::ring_buffer::base::RbWrite::advance_tail (23 samples, 0.02%)core::num::nonzero::<impl core::ops::arith::Rem<core::num::nonzero::NonZero<usize>> for usize>::rem (19 samples, 0.01%)ringbuf::ring_buffer::rb::Rb::push_overwrite (107 samples, 0.08%)ringbuf::ring_buffer::rb::Rb::push (43 samples, 0.03%)ringbuf::producer::Producer<T,R>::push (43 samples, 0.03%)tokio::runtime::task::abort::AbortHandle::is_finished (84 samples, 0.06%)tokio::runtime::task::state::Snapshot::is_complete (84 samples, 0.06%)tokio::runtime::task::join::JoinHandle<T>::abort_handle (17 samples, 0.01%)tokio::runtime::task::raw::RawTask::ref_inc (17 samples, 0.01%)tokio::runtime::task::state::State::ref_inc (17 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (14 samples, 0.01%)core::sync::atomic::atomic_add (14 samples, 0.01%)__GI___lll_lock_wake_private (22 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)malloc_consolidate (95 samples, 0.07%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (76 samples, 0.06%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (31 samples, 0.02%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (26 samples, 0.02%)_int_malloc (282 samples, 0.21%)__GI___libc_malloc (323 samples, 0.25%)alloc::vec::Vec<T>::with_capacity (326 samples, 0.25%)alloc::vec::Vec<T,A>::with_capacity_in (326 samples, 0.25%)alloc::raw_vec::RawVec<T,A>::with_capacity_in (324 samples, 0.25%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (324 samples, 0.25%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (324 samples, 0.25%)alloc::alloc::Global::alloc_impl (324 samples, 0.25%)alloc::alloc::alloc (324 samples, 0.25%)__rdl_alloc (324 samples, 0.25%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (324 samples, 0.25%)tokio::io::ready::Ready::intersection (24 samples, 0.02%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (199 samples, 0.15%)tokio::util::bit::Pack::unpack (16 samples, 0.01%)tokio::util::bit::unpack (16 samples, 0.01%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (19 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (17 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (16 samples, 0.01%)tokio::net::udp::UdpSocket::readable::{{closure}} (222 samples, 0.17%)tokio::net::udp::UdpSocket::ready::{{closure}} (222 samples, 0.17%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (50 samples, 0.04%)std::io::error::repr_bitpacked::Repr::data (14 samples, 0.01%)std::io::error::repr_bitpacked::decode_repr (14 samples, 0.01%)std::io::error::Error::kind (16 samples, 0.01%)<core::result::Result<T,E> as core::ops::try_trait::Try>::branch (14 samples, 0.01%)[unknown] (8,756 samples, 6.67%)[unknown][unknown] (8,685 samples, 6.61%)[unknown][unknown] (8,574 samples, 6.53%)[unknown][unknown] (8,415 samples, 6.41%)[unknown][unknown] (7,686 samples, 5.85%)[unknow..[unknown] (7,239 samples, 5.51%)[unknow..[unknown] (6,566 samples, 5.00%)[unkno..[unknown] (5,304 samples, 4.04%)[unk..[unknown] (4,008 samples, 3.05%)[un..[unknown] (3,571 samples, 2.72%)[u..[unknown] (2,375 samples, 1.81%)[..[unknown] (1,844 samples, 1.40%)[unknown] (1,030 samples, 0.78%)[unknown] (344 samples, 0.26%)[unknown] (113 samples, 0.09%)__libc_recvfrom (8,903 samples, 6.78%)__libc_re..__GI___pthread_disable_asynccancel (22 samples, 0.02%)std::sys::pal::unix::cvt (20 samples, 0.02%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}}::{{closure}} (9,005 samples, 6.86%)tokio::ne..mio::net::udp::UdpSocket::recv_from (8,964 samples, 6.83%)mio::net:..mio::io_source::IoSource<T>::do_io (8,964 samples, 6.83%)mio::io_s..mio::sys::unix::stateless_io_source::IoSourceState::do_io (8,964 samples, 6.83%)mio::sys:..mio::net::udp::UdpSocket::recv_from::{{closure}} (8,964 samples, 6.83%)mio::net:..std::net::udp::UdpSocket::recv_from (8,964 samples, 6.83%)std::net:..std::sys_common::net::UdpSocket::recv_from (8,964 samples, 6.83%)std::sys_..std::sys::pal::unix::net::Socket::recv_from (8,964 samples, 6.83%)std::sys:..std::sys::pal::unix::net::Socket::recv_from_with_flags (8,964 samples, 6.83%)std::sys:..std::sys_common::net::sockaddr_to_addr (23 samples, 0.02%)tokio::runtime::io::registration::Registration::clear_readiness (18 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::clear_readiness (18 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (32 samples, 0.02%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (9,967 samples, 7.59%)torrust_tr..tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (9,291 samples, 7.08%)tokio::ne..tokio::runtime::io::registration::Registration::async_io::{{closure}} (9,287 samples, 7.07%)tokio::ru..tokio::runtime::io::registration::Registration::readiness::{{closure}} (45 samples, 0.03%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (41 samples, 0.03%)__memcpy_avx512_unaligned_erms (424 samples, 0.32%)__memcpy_avx512_unaligned_erms (493 samples, 0.38%)__memcpy_avx512_unaligned_erms (298 samples, 0.23%)syscall (1,105 samples, 0.84%)[unknown] (1,095 samples, 0.83%)[unknown] (1,091 samples, 0.83%)[unknown] (1,049 samples, 0.80%)[unknown] (998 samples, 0.76%)[unknown] (907 samples, 0.69%)[unknown] (710 samples, 0.54%)[unknown] (635 samples, 0.48%)[unknown] (538 samples, 0.41%)[unknown] (358 samples, 0.27%)[unknown] (256 samples, 0.19%)[unknown] (153 samples, 0.12%)[unknown] (96 samples, 0.07%)[unknown] (81 samples, 0.06%)tokio::runtime::context::with_scheduler (36 samples, 0.03%)std::thread::local::LocalKey<T>::try_with (31 samples, 0.02%)tokio::runtime::context::with_scheduler::{{closure}} (27 samples, 0.02%)tokio::runtime::context::scoped::Scoped<T>::with (27 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (25 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (22 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (15 samples, 0.01%)core::sync::atomic::AtomicUsize::fetch_add (340 samples, 0.26%)core::sync::atomic::atomic_add (340 samples, 0.26%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (354 samples, 0.27%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (367 samples, 0.28%)[unknown] (95 samples, 0.07%)[unknown] (93 samples, 0.07%)[unknown] (92 samples, 0.07%)[unknown] (90 samples, 0.07%)[unknown] (82 samples, 0.06%)[unknown] (73 samples, 0.06%)[unknown] (63 samples, 0.05%)[unknown] (44 samples, 0.03%)[unknown] (40 samples, 0.03%)[unknown] (35 samples, 0.03%)[unknown] (30 samples, 0.02%)[unknown] (22 samples, 0.02%)[unknown] (21 samples, 0.02%)[unknown] (20 samples, 0.02%)[unknown] (17 samples, 0.01%)tokio::runtime::driver::Handle::unpark (99 samples, 0.08%)tokio::runtime::driver::IoHandle::unpark (99 samples, 0.08%)tokio::runtime::io::driver::Handle::unpark (99 samples, 0.08%)mio::waker::Waker::wake (99 samples, 0.08%)mio::sys::unix::waker::fdbased::Waker::wake (99 samples, 0.08%)mio::sys::unix::waker::eventfd::WakerInternal::wake (99 samples, 0.08%)<&std::fs::File as std::io::Write>::write (99 samples, 0.08%)std::sys::pal::unix::fs::File::write (99 samples, 0.08%)std::sys::pal::unix::fd::FileDesc::write (99 samples, 0.08%)__GI___libc_write (99 samples, 0.08%)__GI___libc_write (99 samples, 0.08%)tokio::runtime::context::with_scheduler (1,615 samples, 1.23%)std::thread::local::LocalKey<T>::try_with (1,613 samples, 1.23%)tokio::runtime::context::with_scheduler::{{closure}} (1,612 samples, 1.23%)tokio::runtime::context::scoped::Scoped<T>::with (1,611 samples, 1.23%)tokio::runtime::scheduler::multi_thread::worker::with_current::{{closure}} (1,611 samples, 1.23%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task::{{closure}} (1,611 samples, 1.23%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_local (1,609 samples, 1.23%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (1,609 samples, 1.23%)tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (101 samples, 0.08%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (101 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_option_task_without_yield (1,647 samples, 1.25%)tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::schedule_task (1,646 samples, 1.25%)tokio::runtime::scheduler::multi_thread::worker::with_current (1,646 samples, 1.25%)tokio::util::sharded_list::ShardGuard<L,<L as tokio::util::linked_list::Link>::Target>::push (23 samples, 0.02%)tokio::util::linked_list::LinkedList<L,<L as tokio::util::linked_list::Link>::Target>::push_front (18 samples, 0.01%)tokio::runtime::task::list::OwnedTasks<S>::bind_inner (104 samples, 0.08%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::lock_shard (60 samples, 0.05%)tokio::util::sharded_list::ShardedList<L,<L as tokio::util::linked_list::Link>::Target>::shard_inner (57 samples, 0.04%)tokio::loom::std::mutex::Mutex<T>::lock (51 samples, 0.04%)std::sync::mutex::Mutex<T>::lock (51 samples, 0.04%)std::sys::sync::mutex::futex::Mutex::lock (49 samples, 0.04%)core::sync::atomic::AtomicU32::compare_exchange (38 samples, 0.03%)core::sync::atomic::atomic_compare_exchange (38 samples, 0.03%)__memcpy_avx512_unaligned_erms (162 samples, 0.12%)__memcpy_avx512_unaligned_erms (34 samples, 0.03%)__GI___lll_lock_wake_private (127 samples, 0.10%)[unknown] (125 samples, 0.10%)[unknown] (124 samples, 0.09%)[unknown] (119 samples, 0.09%)[unknown] (110 samples, 0.08%)[unknown] (106 samples, 0.08%)[unknown] (87 samples, 0.07%)[unknown] (82 samples, 0.06%)[unknown] (51 samples, 0.04%)[unknown] (27 samples, 0.02%)[unknown] (19 samples, 0.01%)[unknown] (14 samples, 0.01%)_int_free (77 samples, 0.06%)[unknown] (1,207 samples, 0.92%)[unknown] (1,146 samples, 0.87%)[unknown] (1,126 samples, 0.86%)[unknown] (1,091 samples, 0.83%)[unknown] (1,046 samples, 0.80%)[unknown] (962 samples, 0.73%)[unknown] (914 samples, 0.70%)[unknown] (848 samples, 0.65%)[unknown] (774 samples, 0.59%)[unknown] (580 samples, 0.44%)[unknown] (456 samples, 0.35%)[unknown] (305 samples, 0.23%)[unknown] (85 samples, 0.06%)__GI_mprotect (2,474 samples, 1.88%)_..[unknown] (2,457 samples, 1.87%)[..[unknown] (2,440 samples, 1.86%)[..[unknown] (2,436 samples, 1.86%)[..[unknown] (2,435 samples, 1.85%)[..[unknown] (2,360 samples, 1.80%)[..[unknown] (2,203 samples, 1.68%)[unknown] (1,995 samples, 1.52%)[unknown] (1,709 samples, 1.30%)[unknown] (1,524 samples, 1.16%)[unknown] (1,193 samples, 0.91%)[unknown] (865 samples, 0.66%)[unknown] (539 samples, 0.41%)[unknown] (259 samples, 0.20%)[unknown] (80 samples, 0.06%)[unknown] (29 samples, 0.02%)sysmalloc (3,786 samples, 2.88%)sy..grow_heap (2,509 samples, 1.91%)g.._int_malloc (4,038 samples, 3.08%)_in..unlink_chunk (31 samples, 0.02%)alloc::alloc::exchange_malloc (4,335 samples, 3.30%)all..<alloc::alloc::Global as core::alloc::Allocator>::allocate (4,329 samples, 3.30%)<al..alloc::alloc::Global::alloc_impl (4,329 samples, 3.30%)all..alloc::alloc::alloc (4,329 samples, 3.30%)all..__rdl_alloc (4,329 samples, 3.30%)__r..std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (4,329 samples, 3.30%)std..std::sys::pal::unix::alloc::aligned_malloc (4,329 samples, 3.30%)std..__posix_memalign (4,297 samples, 3.27%)__p..__posix_memalign (4,297 samples, 3.27%)__p.._mid_memalign (4,297 samples, 3.27%)_mi.._int_memalign (4,149 samples, 3.16%)_in..sysmalloc (18 samples, 0.01%)core::option::Option<T>::map (6,666 samples, 5.08%)core::..tokio::task::spawn::spawn_inner::{{closure}} (6,665 samples, 5.08%)tokio:..tokio::runtime::scheduler::Handle::spawn (6,665 samples, 5.08%)tokio:..tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (6,664 samples, 5.08%)tokio:..tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (6,661 samples, 5.07%)tokio:..tokio::runtime::task::list::OwnedTasks<S>::bind (4,692 samples, 3.57%)toki..tokio::runtime::task::new_task (4,579 samples, 3.49%)tok..tokio::runtime::task::raw::RawTask::new (4,579 samples, 3.49%)tok..tokio::runtime::task::core::Cell<T,S>::new (4,579 samples, 3.49%)tok..alloc::boxed::Box<T>::new (4,389 samples, 3.34%)all..tokio::runtime::context::current::with_current (7,636 samples, 5.82%)tokio::..std::thread::local::LocalKey<T>::try_with (7,635 samples, 5.81%)std::th..tokio::runtime::context::current::with_current::{{closure}} (7,188 samples, 5.47%)tokio::..tokio::task::spawn::spawn (7,670 samples, 5.84%)tokio::..tokio::task::spawn::spawn_inner (7,670 samples, 5.84%)tokio::..tokio::runtime::task::id::Id::next (24 samples, 0.02%)core::sync::atomic::AtomicU64::fetch_add (24 samples, 0.02%)core::sync::atomic::atomic_add (24 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (62,691 samples, 47.75%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_muttokio::runtime::task::core::Core<T,S>::poll::{{closure}} (62,691 samples, 47.75%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}}torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (18,228 samples, 13.88%)torrust_tracker::serv..torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (18,226 samples, 13.88%)torrust_tracker::serv..torrust_tracker::servers::udp::server::Udp::spawn_request_processor (7,679 samples, 5.85%)torrust..__memcpy_avx512_unaligned_erms (38 samples, 0.03%)__memcpy_avx512_unaligned_erms (407 samples, 0.31%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (411 samples, 0.31%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (411 samples, 0.31%)tokio::runtime::task::core::Core<T,S>::poll (63,150 samples, 48.10%)tokio::runtime::task::core::Core<T,S>::polltokio::runtime::task::core::Core<T,S>::drop_future_or_output (459 samples, 0.35%)tokio::runtime::task::core::Core<T,S>::set_stage (459 samples, 0.35%)__memcpy_avx512_unaligned_erms (16 samples, 0.01%)__memcpy_avx512_unaligned_erms (398 samples, 0.30%)__memcpy_avx512_unaligned_erms (325 samples, 0.25%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (330 samples, 0.25%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (330 samples, 0.25%)tokio::runtime::task::core::Core<T,S>::set_stage (731 samples, 0.56%)tokio::runtime::task::harness::poll_future (63,908 samples, 48.67%)tokio::runtime::task::harness::poll_futurestd::panic::catch_unwind (63,908 samples, 48.67%)std::panic::catch_unwindstd::panicking::try (63,908 samples, 48.67%)std::panicking::trystd::panicking::try::do_call (63,908 samples, 48.67%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (63,908 samples, 48.67%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()..tokio::runtime::task::harness::poll_future::{{closure}} (63,908 samples, 48.67%)tokio::runtime::task::harness::poll_future::{{closure}}tokio::runtime::task::core::Core<T,S>::store_output (758 samples, 0.58%)tokio::runtime::coop::budget (65,027 samples, 49.53%)tokio::runtime::coop::budgettokio::runtime::coop::with_budget (65,027 samples, 49.53%)tokio::runtime::coop::with_budgettokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (65,009 samples, 49.51%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}}tokio::runtime::task::LocalNotified<S>::run (65,003 samples, 49.51%)tokio::runtime::task::LocalNotified<S>::runtokio::runtime::task::raw::RawTask::poll (65,003 samples, 49.51%)tokio::runtime::task::raw::RawTask::polltokio::runtime::task::raw::poll (64,538 samples, 49.15%)tokio::runtime::task::raw::polltokio::runtime::task::harness::Harness<T,S>::poll (64,493 samples, 49.12%)tokio::runtime::task::harness::Harness<T,S>::polltokio::runtime::task::harness::Harness<T,S>::poll_inner (63,919 samples, 48.68%)tokio::runtime::task::harness::Harness<T,S>::poll_innertokio::runtime::scheduler::multi_thread::stats::Stats::start_poll (93 samples, 0.07%)syscall (2,486 samples, 1.89%)s..[unknown] (2,424 samples, 1.85%)[..[unknown] (2,416 samples, 1.84%)[..[unknown] (2,130 samples, 1.62%)[unknown] (2,013 samples, 1.53%)[unknown] (1,951 samples, 1.49%)[unknown] (1,589 samples, 1.21%)[unknown] (1,415 samples, 1.08%)[unknown] (1,217 samples, 0.93%)[unknown] (820 samples, 0.62%)[unknown] (564 samples, 0.43%)[unknown] (360 samples, 0.27%)[unknown] (244 samples, 0.19%)[unknown] (194 samples, 0.15%)tokio::runtime::scheduler::multi_thread::idle::Idle::notify_should_wakeup (339 samples, 0.26%)core::sync::atomic::AtomicUsize::fetch_add (337 samples, 0.26%)core::sync::atomic::atomic_add (337 samples, 0.26%)tokio::runtime::scheduler::multi_thread::idle::Idle::worker_to_notify (364 samples, 0.28%)[unknown] (154 samples, 0.12%)[unknown] (152 samples, 0.12%)[unknown] (143 samples, 0.11%)[unknown] (139 samples, 0.11%)[unknown] (131 samples, 0.10%)[unknown] (123 samples, 0.09%)[unknown] (110 samples, 0.08%)[unknown] (80 samples, 0.06%)[unknown] (74 samples, 0.06%)[unknown] (65 samples, 0.05%)[unknown] (64 samples, 0.05%)[unknown] (47 samples, 0.04%)[unknown] (44 samples, 0.03%)[unknown] (43 samples, 0.03%)[unknown] (40 samples, 0.03%)[unknown] (26 samples, 0.02%)[unknown] (20 samples, 0.02%)__GI___libc_write (158 samples, 0.12%)__GI___libc_write (158 samples, 0.12%)mio::sys::unix::waker::eventfd::WakerInternal::wake (159 samples, 0.12%)<&std::fs::File as std::io::Write>::write (159 samples, 0.12%)std::sys::pal::unix::fs::File::write (159 samples, 0.12%)std::sys::pal::unix::fd::FileDesc::write (159 samples, 0.12%)tokio::runtime::driver::Handle::unpark (168 samples, 0.13%)tokio::runtime::driver::IoHandle::unpark (168 samples, 0.13%)tokio::runtime::io::driver::Handle::unpark (168 samples, 0.13%)mio::waker::Waker::wake (165 samples, 0.13%)mio::sys::unix::waker::fdbased::Waker::wake (165 samples, 0.13%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (68,159 samples, 51.91%)tokio::runtime::scheduler::multi_thread::worker::Context::run_tasktokio::runtime::scheduler::multi_thread::worker::Core::transition_from_searching (3,024 samples, 2.30%)t..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::transition_worker_from_searching (3,023 samples, 2.30%)t..tokio::runtime::scheduler::multi_thread::worker::<impl tokio::runtime::scheduler::multi_thread::handle::Handle>::notify_parked_local (3,022 samples, 2.30%)t..tokio::runtime::scheduler::multi_thread::park::Unparker::unpark (171 samples, 0.13%)tokio::runtime::scheduler::multi_thread::park::Inner::unpark (171 samples, 0.13%)core::option::Option<T>::or_else (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task::{{closure}} (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Local<T>::pop (14 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::next_local_task (18 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Core::tune_global_queue_interval (53 samples, 0.04%)tokio::runtime::scheduler::multi_thread::stats::Stats::tuned_global_queue_interval (53 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::Core::next_task (107 samples, 0.08%)__GI___libc_free (17 samples, 0.01%)_int_free (17 samples, 0.01%)alloc::collections::btree::navigate::LazyLeafRange<alloc::collections::btree::node::marker::Dying,K,V>::deallocating_end (18 samples, 0.01%)alloc::collections::btree::navigate::<impl alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Dying,K,V,alloc::collections::btree::node::marker::Leaf>,alloc::collections::btree::node::marker::Edge>>::deallocating_end (18 samples, 0.01%)alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Dying,K,V,alloc::collections::btree::node::marker::LeafOrInternal>::deallocate_and_ascend (18 samples, 0.01%)<alloc::alloc::Global as core::alloc::Allocator>::deallocate (18 samples, 0.01%)alloc::alloc::dealloc (18 samples, 0.01%)__rdl_dealloc (18 samples, 0.01%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::dealloc (18 samples, 0.01%)alloc::collections::btree::map::IntoIter<K,V,A>::dying_next (19 samples, 0.01%)tokio::runtime::task::Task<S>::shutdown (26 samples, 0.02%)tokio::runtime::task::raw::RawTask::shutdown (26 samples, 0.02%)tokio::runtime::task::raw::shutdown (26 samples, 0.02%)tokio::runtime::task::harness::Harness<T,S>::shutdown (26 samples, 0.02%)tokio::runtime::task::harness::cancel_task (26 samples, 0.02%)std::panic::catch_unwind (26 samples, 0.02%)std::panicking::try (26 samples, 0.02%)std::panicking::try::do_call (26 samples, 0.02%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (26 samples, 0.02%)core::ops::function::FnOnce::call_once (26 samples, 0.02%)tokio::runtime::task::harness::cancel_task::{{closure}} (26 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::drop_future_or_output (26 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage (26 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (26 samples, 0.02%)tokio::runtime::task::core::Core<T,S>::set_stage::{{closure}} (26 samples, 0.02%)alloc::sync::Arc<T,A>::drop_slow (26 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker::core::Tracker> (26 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Arc<torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>> (26 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (26 samples, 0.02%)alloc::sync::Arc<T,A>::drop_slow (26 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker_torrent_repository::repository::RwLockStd<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>> (26 samples, 0.02%)core::ptr::drop_in_place<std::sync::rwlock::RwLock<alloc::collections::btree::map::BTreeMap<torrust_tracker_primitives::info_hash::InfoHash,alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>> (26 samples, 0.02%)core::ptr::drop_in_place<core::cell::UnsafeCell<alloc::collections::btree::map::BTreeMap<torrust_tracker_primitives::info_hash::InfoHash,alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>>> (26 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::BTreeMap<torrust_tracker_primitives::info_hash::InfoHash,alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>> (26 samples, 0.02%)<alloc::collections::btree::map::BTreeMap<K,V,A> as core::ops::drop::Drop>::drop (26 samples, 0.02%)core::mem::drop (26 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::IntoIter<torrust_tracker_primitives::info_hash::InfoHash,alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>>> (26 samples, 0.02%)<alloc::collections::btree::map::IntoIter<K,V,A> as core::ops::drop::Drop>::drop (26 samples, 0.02%)alloc::collections::btree::node::Handle<alloc::collections::btree::node::NodeRef<alloc::collections::btree::node::marker::Dying,K,V,NodeType>,alloc::collections::btree::node::marker::KV>::drop_key_val (24 samples, 0.02%)core::mem::maybe_uninit::MaybeUninit<T>::assume_init_drop (24 samples, 0.02%)core::ptr::drop_in_place<alloc::sync::Arc<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>>> (24 samples, 0.02%)<alloc::sync::Arc<T,A> as core::ops::drop::Drop>::drop (24 samples, 0.02%)alloc::sync::Arc<T,A>::drop_slow (21 samples, 0.02%)core::ptr::drop_in_place<std::sync::mutex::Mutex<torrust_tracker_torrent_repository::entry::Torrent>> (20 samples, 0.02%)core::ptr::drop_in_place<core::cell::UnsafeCell<torrust_tracker_torrent_repository::entry::Torrent>> (20 samples, 0.02%)core::ptr::drop_in_place<torrust_tracker_torrent_repository::entry::Torrent> (20 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::BTreeMap<torrust_tracker_primitives::peer::Id,alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (20 samples, 0.02%)<alloc::collections::btree::map::BTreeMap<K,V,A> as core::ops::drop::Drop>::drop (20 samples, 0.02%)core::mem::drop (20 samples, 0.02%)core::ptr::drop_in_place<alloc::collections::btree::map::IntoIter<torrust_tracker_primitives::peer::Id,alloc::sync::Arc<torrust_tracker_primitives::peer::Peer>>> (20 samples, 0.02%)<alloc::collections::btree::map::IntoIter<K,V,A> as core::ops::drop::Drop>::drop (20 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::pre_shutdown (33 samples, 0.03%)tokio::runtime::task::list::OwnedTasks<S>::close_and_shutdown_all (33 samples, 0.03%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (114 samples, 0.09%)alloc::sync::Arc<T,A>::inner (114 samples, 0.09%)core::ptr::non_null::NonNull<T>::as_ref (114 samples, 0.09%)core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next (108 samples, 0.08%)<core::ops::range::Range<T> as core::iter::range::RangeIteratorImpl>::spec_next (108 samples, 0.08%)core::cmp::impls::<impl core::cmp::PartialOrd for usize>::lt (106 samples, 0.08%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (49 samples, 0.04%)alloc::sync::Arc<T,A>::inner (49 samples, 0.04%)core::ptr::non_null::NonNull<T>::as_ref (49 samples, 0.04%)core::num::<impl u32>::wrapping_sub (132 samples, 0.10%)core::sync::atomic::AtomicU64::load (40 samples, 0.03%)core::sync::atomic::atomic_load (40 samples, 0.03%)tokio::loom::std::atomic_u32::AtomicU32::unsync_load (48 samples, 0.04%)core::sync::atomic::AtomicU32::load (48 samples, 0.04%)core::sync::atomic::atomic_load (48 samples, 0.04%)<alloc::sync::Arc<T,A> as core::ops::deref::Deref>::deref (65 samples, 0.05%)alloc::sync::Arc<T,A>::inner (65 samples, 0.05%)core::ptr::non_null::NonNull<T>::as_ref (65 samples, 0.05%)core::num::<impl u32>::wrapping_sub (50 samples, 0.04%)core::sync::atomic::AtomicU32::load (55 samples, 0.04%)core::sync::atomic::atomic_load (55 samples, 0.04%)core::sync::atomic::AtomicU64::load (80 samples, 0.06%)core::sync::atomic::atomic_load (80 samples, 0.06%)tokio::runtime::scheduler::multi_thread::queue::pack (26 samples, 0.02%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (666 samples, 0.51%)tokio::runtime::scheduler::multi_thread::queue::unpack (147 samples, 0.11%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (1,036 samples, 0.79%)tokio::runtime::scheduler::multi_thread::queue::unpack (46 samples, 0.04%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_to_searching (49 samples, 0.04%)tokio::runtime::scheduler::multi_thread::idle::Idle::transition_worker_to_searching (21 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (2,414 samples, 1.84%)t..tokio::util::rand::FastRand::fastrand_n (24 samples, 0.02%)tokio::util::rand::FastRand::fastrand (24 samples, 0.02%)std::sys_common::backtrace::__rust_begin_short_backtrace (98,136 samples, 74.74%)std::sys_common::backtrace::__rust_begin_short_backtracetokio::runtime::blocking::pool::Spawner::spawn_thread::{{closure}} (98,136 samples, 74.74%)tokio::runtime::blocking::pool::Spawner::spawn_thread::{{closure}}tokio::runtime::blocking::pool::Inner::run (98,136 samples, 74.74%)tokio::runtime::blocking::pool::Inner::runtokio::runtime::blocking::pool::Task::run (98,042 samples, 74.67%)tokio::runtime::blocking::pool::Task::runtokio::runtime::task::UnownedTask<S>::run (98,042 samples, 74.67%)tokio::runtime::task::UnownedTask<S>::runtokio::runtime::task::raw::RawTask::poll (98,042 samples, 74.67%)tokio::runtime::task::raw::RawTask::polltokio::runtime::task::raw::poll (98,042 samples, 74.67%)tokio::runtime::task::raw::polltokio::runtime::task::harness::Harness<T,S>::poll (98,042 samples, 74.67%)tokio::runtime::task::harness::Harness<T,S>::polltokio::runtime::task::harness::Harness<T,S>::poll_inner (98,042 samples, 74.67%)tokio::runtime::task::harness::Harness<T,S>::poll_innertokio::runtime::task::harness::poll_future (98,042 samples, 74.67%)tokio::runtime::task::harness::poll_futurestd::panic::catch_unwind (98,042 samples, 74.67%)std::panic::catch_unwindstd::panicking::try (98,042 samples, 74.67%)std::panicking::trystd::panicking::try::do_call (98,042 samples, 74.67%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (98,042 samples, 74.67%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_oncetokio::runtime::task::harness::poll_future::{{closure}} (98,042 samples, 74.67%)tokio::runtime::task::harness::poll_future::{{closure}}tokio::runtime::task::core::Core<T,S>::poll (98,042 samples, 74.67%)tokio::runtime::task::core::Core<T,S>::polltokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (98,042 samples, 74.67%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_muttokio::runtime::task::core::Core<T,S>::poll::{{closure}} (98,042 samples, 74.67%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}}<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (98,042 samples, 74.67%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::polltokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}}tokio::runtime::scheduler::multi_thread::worker::run (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::runtokio::runtime::context::runtime::enter_runtime (98,042 samples, 74.67%)tokio::runtime::context::runtime::enter_runtimetokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}tokio::runtime::context::set_scheduler (98,042 samples, 74.67%)tokio::runtime::context::set_schedulerstd::thread::local::LocalKey<T>::with (98,042 samples, 74.67%)std::thread::local::LocalKey<T>::withstd::thread::local::LocalKey<T>::try_with (98,042 samples, 74.67%)std::thread::local::LocalKey<T>::try_withtokio::runtime::context::set_scheduler::{{closure}} (98,042 samples, 74.67%)tokio::runtime::context::set_scheduler::{{closure}}tokio::runtime::context::scoped::Scoped<T>::set (98,042 samples, 74.67%)tokio::runtime::context::scoped::Scoped<T>::settokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}}tokio::runtime::scheduler::multi_thread::worker::Context::run (98,042 samples, 74.67%)tokio::runtime::scheduler::multi_thread::worker::Context::runstd::panic::catch_unwind (98,137 samples, 74.74%)std::panic::catch_unwindstd::panicking::try (98,137 samples, 74.74%)std::panicking::trystd::panicking::try::do_call (98,137 samples, 74.74%)std::panicking::try::do_call<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (98,137 samples, 74.74%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_oncestd::thread::Builder::spawn_unchecked_::{{closure}}::{{closure}} (98,137 samples, 74.74%)std::thread::Builder::spawn_unchecked_::{{closure}}::{{closure}}<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once (98,139 samples, 74.74%)<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_once (98,139 samples, 74.74%)<alloc::boxed::Box<F,A> as core::ops::function::FnOnce<Args>>::call_oncecore::ops::function::FnOnce::call_once{{vtable.shim}} (98,139 samples, 74.74%)core::ops::function::FnOnce::call_once{{vtable.shim}}std::thread::Builder::spawn_unchecked_::{{closure}} (98,139 samples, 74.74%)std::thread::Builder::spawn_unchecked_::{{closure}}clone3 (98,205 samples, 74.79%)clone3start_thread (98,205 samples, 74.79%)start_threadstd::sys::pal::unix::thread::Thread::new::thread_start (98,158 samples, 74.76%)std::sys::pal::unix::thread::Thread::new::thread_startcore::ptr::drop_in_place<std::sys::pal::unix::stack_overflow::Handler> (19 samples, 0.01%)<std::sys::pal::unix::stack_overflow::Handler as core::ops::drop::Drop>::drop (19 samples, 0.01%)std::sys::pal::unix::stack_overflow::imp::drop_handler (19 samples, 0.01%)__GI_munmap (19 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (18 samples, 0.01%)[unknown] (17 samples, 0.01%)[unknown] (16 samples, 0.01%)core::fmt::Formatter::pad_integral (112 samples, 0.09%)core::fmt::Formatter::pad_integral::write_prefix (59 samples, 0.04%)core::fmt::Formatter::pad_integral (16 samples, 0.01%)core::fmt::write (20 samples, 0.02%)core::ptr::drop_in_place<aquatic_udp_protocol::response::Response> (19 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::Stage<torrust_tracker::servers::udp::server::Udp::process_request::{{closure}}>> (51 samples, 0.04%)rand_chacha::guts::round (18 samples, 0.01%)rand_chacha::guts::refill_wide::impl_avx2 (26 samples, 0.02%)rand_chacha::guts::refill_wide::fn_impl (26 samples, 0.02%)rand_chacha::guts::refill_wide_impl (26 samples, 0.02%)rand_chacha::guts::refill_wide (14 samples, 0.01%)std_detect::detect::arch::x86::__is_feature_detected::avx2 (14 samples, 0.01%)std_detect::detect::check_for (14 samples, 0.01%)std_detect::detect::cache::test (14 samples, 0.01%)std_detect::detect::cache::Cache::test (14 samples, 0.01%)core::sync::atomic::AtomicUsize::load (14 samples, 0.01%)core::sync::atomic::atomic_load (14 samples, 0.01%)core::cell::RefCell<T>::borrow_mut (81 samples, 0.06%)core::cell::RefCell<T>::try_borrow_mut (81 samples, 0.06%)core::cell::BorrowRefMut::new (81 samples, 0.06%)std::sys::pal::unix::time::Timespec::now (164 samples, 0.12%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task (106 samples, 0.08%)tokio::runtime::coop::budget (105 samples, 0.08%)tokio::runtime::coop::with_budget (105 samples, 0.08%)tokio::runtime::scheduler::multi_thread::worker::Context::run_task::{{closure}} (96 samples, 0.07%)std::sys::pal::unix::time::Timespec::sub_timespec (35 samples, 0.03%)std::sys::sync::mutex::futex::Mutex::lock_contended (15 samples, 0.01%)syscall (90 samples, 0.07%)tokio::runtime::io::scheduled_io::ScheduledIo::wake (15 samples, 0.01%)tokio::runtime::scheduler::multi_thread::worker::Context::park (22 samples, 0.02%)tokio::runtime::scheduler::multi_thread::worker::Core::transition_from_parked (21 samples, 0.02%)<tokio::runtime::blocking::task::BlockingTask<T> as core::future::future::Future>::poll (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::Launch::launch::{{closure}} (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::run (61 samples, 0.05%)tokio::runtime::context::runtime::enter_runtime (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}} (61 samples, 0.05%)tokio::runtime::context::set_scheduler (61 samples, 0.05%)std::thread::local::LocalKey<T>::with (61 samples, 0.05%)std::thread::local::LocalKey<T>::try_with (61 samples, 0.05%)tokio::runtime::context::set_scheduler::{{closure}} (61 samples, 0.05%)tokio::runtime::context::scoped::Scoped<T>::set (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::run::{{closure}}::{{closure}} (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::Context::run (61 samples, 0.05%)tokio::runtime::scheduler::multi_thread::worker::Core::steal_work (19 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into (17 samples, 0.01%)tokio::runtime::scheduler::multi_thread::queue::Steal<T>::steal_into2 (17 samples, 0.01%)tokio::runtime::context::CONTEXT::__getit (14 samples, 0.01%)core::cell::Cell<T>::get (14 samples, 0.01%)core::ptr::drop_in_place<tokio::runtime::task::core::TaskIdGuard> (22 samples, 0.02%)<tokio::runtime::task::core::TaskIdGuard as core::ops::drop::Drop>::drop (22 samples, 0.02%)tokio::runtime::context::set_current_task_id (22 samples, 0.02%)std::thread::local::LocalKey<T>::try_with (22 samples, 0.02%)tokio::loom::std::unsafe_cell::UnsafeCell<T>::with_mut (112 samples, 0.09%)tokio::runtime::task::core::Core<T,S>::poll::{{closure}} (111 samples, 0.08%)tokio::runtime::task::harness::poll_future (125 samples, 0.10%)std::panic::catch_unwind (125 samples, 0.10%)std::panicking::try (125 samples, 0.10%)std::panicking::try::do_call (125 samples, 0.10%)<core::panic::unwind_safe::AssertUnwindSafe<F> as core::ops::function::FnOnce<()>>::call_once (125 samples, 0.10%)tokio::runtime::task::harness::poll_future::{{closure}} (125 samples, 0.10%)tokio::runtime::task::core::Core<T,S>::poll (125 samples, 0.10%)tokio::runtime::task::raw::poll (157 samples, 0.12%)tokio::runtime::task::harness::Harness<T,S>::poll (135 samples, 0.10%)tokio::runtime::task::harness::Harness<T,S>::poll_inner (135 samples, 0.10%)tokio::runtime::time::Driver::park_internal (15 samples, 0.01%)torrust_tracker::bootstrap::logging::INIT (17 samples, 0.01%)__memcpy_avx512_unaligned_erms (397 samples, 0.30%)_int_free (24 samples, 0.02%)_int_malloc (132 samples, 0.10%)torrust_tracker::servers::udp::logging::log_request::__CALLSITE::META (570 samples, 0.43%)__GI___lll_lock_wait_private (22 samples, 0.02%)futex_wait (14 samples, 0.01%)__memcpy_avx512_unaligned_erms (299 samples, 0.23%)_int_free (16 samples, 0.01%)torrust_tracker::servers::udp::logging::log_request::__CALLSITE (361 samples, 0.27%)torrust_tracker::servers::udp::server::Udp::process_request::{{closure}} (41 samples, 0.03%)torrust_tracker::servers::udp::handlers::handle_packet::{{closure}} (23 samples, 0.02%)torrust_tracker::servers::udp::server::Udp::process_valid_request::{{closure}} (53 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::send_response::{{closure}} (14 samples, 0.01%)<tokio::runtime::io::scheduled_io::Readiness as core::future::future::Future>::poll (63 samples, 0.05%)<tokio::runtime::io::scheduled_io::Readiness as core::ops::drop::Drop>::drop (21 samples, 0.02%)__GI___libc_malloc (18 samples, 0.01%)alloc::vec::Vec<T>::with_capacity (116 samples, 0.09%)alloc::vec::Vec<T,A>::with_capacity_in (116 samples, 0.09%)alloc::raw_vec::RawVec<T,A>::with_capacity_in (116 samples, 0.09%)alloc::raw_vec::RawVec<T,A>::try_allocate_in (116 samples, 0.09%)<alloc::alloc::Global as core::alloc::Allocator>::allocate (116 samples, 0.09%)alloc::alloc::Global::alloc_impl (116 samples, 0.09%)alloc::alloc::alloc (116 samples, 0.09%)__rdl_alloc (116 samples, 0.09%)std::sys::pal::unix::alloc::<impl core::alloc::global::GlobalAlloc for std::alloc::System>::alloc (116 samples, 0.09%)tokio::runtime::io::registration::Registration::readiness::{{closure}} (53 samples, 0.04%)tokio::runtime::io::scheduled_io::ScheduledIo::readiness::{{closure}} (53 samples, 0.04%)core::ptr::drop_in_place<tokio::runtime::io::scheduled_io::Readiness> (53 samples, 0.04%)_int_malloc (21 samples, 0.02%)[unknown] (36 samples, 0.03%)[unknown] (16 samples, 0.01%)core::mem::zeroed (27 samples, 0.02%)core::mem::maybe_uninit::MaybeUninit<T>::zeroed (27 samples, 0.02%)core::ptr::mut_ptr::<impl *mut T>::write_bytes (27 samples, 0.02%)core::intrinsics::write_bytes (27 samples, 0.02%)[unknown] (27 samples, 0.02%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}}::{{closure}} (64 samples, 0.05%)mio::net::udp::UdpSocket::recv_from (49 samples, 0.04%)mio::io_source::IoSource<T>::do_io (49 samples, 0.04%)mio::sys::unix::stateless_io_source::IoSourceState::do_io (49 samples, 0.04%)mio::net::udp::UdpSocket::recv_from::{{closure}} (49 samples, 0.04%)std::net::udp::UdpSocket::recv_from (49 samples, 0.04%)std::sys_common::net::UdpSocket::recv_from (49 samples, 0.04%)std::sys::pal::unix::net::Socket::recv_from (49 samples, 0.04%)std::sys::pal::unix::net::Socket::recv_from_with_flags (49 samples, 0.04%)torrust_tracker::servers::udp::server::Udp::receive_request::{{closure}} (271 samples, 0.21%)tokio::net::udp::UdpSocket::recv_buf_from::{{closure}} (143 samples, 0.11%)tokio::runtime::io::registration::Registration::async_io::{{closure}} (141 samples, 0.11%)tokio::runtime::io::registration::Registration::clear_readiness (15 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::clear_readiness (15 samples, 0.01%)tokio::runtime::io::scheduled_io::ScheduledIo::set_readiness (15 samples, 0.01%)torrust_tracker::servers::udp::server::Udp::run_with_graceful_shutdown::{{closure}}::{{closure}} (359 samples, 0.27%)torrust_tracker::servers::udp::server::Udp::run_udp_server::{{closure}} (346 samples, 0.26%)torrust_tracker::servers::udp::server::Udp::spawn_request_processor (39 samples, 0.03%)tokio::task::spawn::spawn (39 samples, 0.03%)tokio::task::spawn::spawn_inner (39 samples, 0.03%)tokio::runtime::context::current::with_current (39 samples, 0.03%)std::thread::local::LocalKey<T>::try_with (39 samples, 0.03%)tokio::runtime::context::current::with_current::{{closure}} (39 samples, 0.03%)core::option::Option<T>::map (39 samples, 0.03%)tokio::task::spawn::spawn_inner::{{closure}} (39 samples, 0.03%)tokio::runtime::scheduler::Handle::spawn (39 samples, 0.03%)tokio::runtime::scheduler::multi_thread::handle::Handle::spawn (39 samples, 0.03%)tokio::runtime::scheduler::multi_thread::handle::Handle::bind_new_task (39 samples, 0.03%)tokio::runtime::task::list::OwnedTasks<S>::bind (34 samples, 0.03%)all (131,301 samples, 100%)tokio-runtime-w (131,061 samples, 99.82%)tokio-runtime-w \ No newline at end of file diff --git a/docs/media/kcachegrind-screenshot.png b/docs/media/kcachegrind-screenshot.png new file mode 100644 index 000000000..a10eb5ad6 Binary files /dev/null and b/docs/media/kcachegrind-screenshot.png differ diff --git a/docs/media/torrent-repository-implementations-benchmarking-report.png b/docs/media/torrent-repository-implementations-benchmarking-report.png new file mode 100644 index 000000000..ee87c6d42 Binary files /dev/null and b/docs/media/torrent-repository-implementations-benchmarking-report.png differ diff --git a/docs/profiling.md b/docs/profiling.md new file mode 100644 index 000000000..8038f9e77 --- /dev/null +++ b/docs/profiling.md @@ -0,0 +1,132 @@ +# Profiling + +## Using flamegraph + +### Requirements + +You need to install some dependencies. For Ubuntu you can run: + +```console +sudo apt-get install clang lld +``` + +You also need to uncomment these lines in the cargo [config.toml](./../.cargo/config.toml) file. + +```toml +[target.x86_64-unknown-linux-gnu] +linker = "/usr/bin/clang" +rustflags = ["-Clink-arg=-fuse-ld=lld", "-Clink-arg=-Wl,--no-rosegment"] +``` + +Follow the [flamegraph](https://github.com/flamegraph-rs/flamegraph) instructions for installation. + +Apart from running the tracker you will need to run some request if you want to profile services while they are processing requests. + +You can use the aquatic [UDP load test](https://github.com/greatest-ape/aquatic/tree/master/crates/udp_load_test) script. + +### Generate flamegraph + +To generate the graph you will need to: + +1. Build the tracker for profiling. +2. Run the aquatic UDP load test. +3. Run the tracker with flamegraph and profiling configuration. + +```console +cargo build --profile=release-debug --bin=profiling +./target/release/aquatic_udp_load_test -c "load-test-config.toml" +sudo TORRUST_TRACKER_CONFIG_TOML_PATH="./share/default/config/tracker.udp.benchmarking.toml" /home/USER/.cargo/bin/flamegraph -- ./target/release-debug/profiling 60 +``` + +__NOTICE__: You need to install the `aquatic_udp_load_test` program. + +The output should be like the following: + +```output +Loading configuration file: `./share/default/config/tracker.udp.benchmarking.toml` ... +Torrust successfully shutdown. +[ perf record: Woken up 23377 times to write data ] +Warning: +Processed 533730 events and lost 3 chunks! + +Check IO/CPU overload! + +[ perf record: Captured and wrote 5899.806 MB perf.data (373239 samples) ] +writing flamegraph to "flamegraph.svg" +``` + +![flamegraph](./media/flamegraph.svg) + +__NOTICE__: You need to provide the absolute path for the installed `flamegraph` app if you use sudo. Replace `/home/USER/.cargo/bin/flamegraph` with the location of your installed `flamegraph` app. You can run it without sudo but you can get a warning message like the following: + +```output +WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted, +check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid. + +Samples in kernel functions may not be resolved if a suitable vmlinux +file is not found in the buildid cache or in the vmlinux path. + +Samples in kernel modules won't be resolved at all. + +If some relocation was applied (e.g. kexec) symbols may be misresolved +even with a suitable vmlinux or kallsyms file. + +Couldn't record kernel reference relocation symbol +Symbol resolution may be skewed if relocation was used (e.g. kexec). +Check /proc/kallsyms permission or run as root. +Loading configuration file: `./share/default/config/tracker.udp.benchmarking.toml` ... +``` + +And some bars in the graph will have the `unknown` label. + +![flamegraph generated without sudo](./media/flamegraph_generated_without_sudo.svg) + +## Using valgrind and kcachegrind + +You need to: + +1. Build an run the tracker for profiling. +2. Make requests to the tracker while it's running. + +Build and the binary for profiling: + +```console +RUSTFLAGS='-g' cargo build --release --bin profiling \ + && export TORRUST_TRACKER_CONFIG_TOML_PATH="./share/default/config/tracker.udp.benchmarking.toml" \ + && valgrind \ + --tool=callgrind \ + --callgrind-out-file=callgrind.out \ + --collect-jumps=yes \ + --simulate-cache=yes \ + ./target/release/profiling 60 +``` + +> NOTICE: You should make requests to the services you want to profile. For example, using the [UDP load test](./benchmarking.md#run-udp-load-test). + +After running the tracker with ` **The `[semantic version]` is bumped according to releases, new features, and breaking changes.** @@ -7,6 +7,20 @@ ## Process: +**Note**: this guide assumes that the your git `torrust` remote is like this: + +```sh +git remote show torrust +``` + +```s +* remote torrust + Fetch URL: git@github.com:torrust/torrust-tracker.git + Push URL: git@github.com:torrust/torrust-tracker.git +... +``` + + ### 1. The `develop` branch is ready for a release. The `develop` branch should have the version `[semantic version]-develop` that is ready to be released. @@ -22,6 +36,7 @@ git push --force torrust develop:staging/main ```sh git stash git switch staging/main +git reset --hard torrust/staging/main # change `[semantic version]-develop` to `[semantic version]`. git add -A git commit -m "release: version [semantic version]" @@ -65,7 +80,8 @@ git push --force torrust main:staging/develop ```sh git stash -git switch staging/main +git switch staging/develop +git reset --hard torrust/staging/develop # change `[semantic version]` to `(next)[semantic version]-develop`. git add -A git commit -m "develop: bump to version (next)[semantic version]-develop" diff --git a/migrations/README.md b/migrations/README.md new file mode 100644 index 000000000..090c46ccb --- /dev/null +++ b/migrations/README.md @@ -0,0 +1,5 @@ +# Database Migrations + +We don't support automatic migrations yet. The tracker creates all the needed tables when it starts. The SQL sentences are hardcoded in each database driver. + +The migrations in this folder were introduced to add some new changes (permanent keys) and to allow users to migrate to the new version. In the future, we will remove the hardcoded SQL and start using a Rust crate for database migrations. For the time being, if you are using the initial schema described in the migration `20240730183000_torrust_tracker_create_all_tables.sql` you will need to run all the subsequent migrations manually. diff --git a/migrations/mysql/20240730183000_torrust_tracker_create_all_tables.sql b/migrations/mysql/20240730183000_torrust_tracker_create_all_tables.sql new file mode 100644 index 000000000..407ae4dd1 --- /dev/null +++ b/migrations/mysql/20240730183000_torrust_tracker_create_all_tables.sql @@ -0,0 +1,21 @@ +CREATE TABLE + IF NOT EXISTS whitelist ( + id integer PRIMARY KEY AUTO_INCREMENT, + info_hash VARCHAR(40) NOT NULL UNIQUE + ); + +CREATE TABLE + IF NOT EXISTS torrents ( + id integer PRIMARY KEY AUTO_INCREMENT, + info_hash VARCHAR(40) NOT NULL UNIQUE, + completed INTEGER DEFAULT 0 NOT NULL + ); + +CREATE TABLE + IF NOT EXISTS `keys` ( + `id` INT NOT NULL AUTO_INCREMENT, + `key` VARCHAR(32) NOT NULL, + `valid_until` INT (10) NOT NULL, + PRIMARY KEY (`id`), + UNIQUE (`key`) + ); \ No newline at end of file diff --git a/migrations/mysql/20240730183500_torrust_tracker_keys_valid_until_nullable.sql b/migrations/mysql/20240730183500_torrust_tracker_keys_valid_until_nullable.sql new file mode 100644 index 000000000..2602797d6 --- /dev/null +++ b/migrations/mysql/20240730183500_torrust_tracker_keys_valid_until_nullable.sql @@ -0,0 +1 @@ +ALTER TABLE `keys` CHANGE `valid_until` `valid_until` INT (10); \ No newline at end of file diff --git a/migrations/sqlite/20240730183000_torrust_tracker_create_all_tables.sql b/migrations/sqlite/20240730183000_torrust_tracker_create_all_tables.sql new file mode 100644 index 000000000..bd451bf8b --- /dev/null +++ b/migrations/sqlite/20240730183000_torrust_tracker_create_all_tables.sql @@ -0,0 +1,19 @@ +CREATE TABLE + IF NOT EXISTS whitelist ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + info_hash TEXT NOT NULL UNIQUE + ); + +CREATE TABLE + IF NOT EXISTS torrents ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + info_hash TEXT NOT NULL UNIQUE, + completed INTEGER DEFAULT 0 NOT NULL + ); + +CREATE TABLE + IF NOT EXISTS keys ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + key TEXT NOT NULL UNIQUE, + valid_until INTEGER NOT NULL + ); \ No newline at end of file diff --git a/migrations/sqlite/20240730183500_torrust_tracker_keys_valid_until_nullable.sql b/migrations/sqlite/20240730183500_torrust_tracker_keys_valid_until_nullable.sql new file mode 100644 index 000000000..c6746e3ee --- /dev/null +++ b/migrations/sqlite/20240730183500_torrust_tracker_keys_valid_until_nullable.sql @@ -0,0 +1,12 @@ +CREATE TABLE + IF NOT EXISTS keys_new ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + key TEXT NOT NULL UNIQUE, + valid_until INTEGER + ); + +INSERT INTO keys_new SELECT * FROM `keys`; + +DROP TABLE `keys`; + +ALTER TABLE keys_new RENAME TO `keys`; \ No newline at end of file diff --git a/packages/clock/Cargo.toml b/packages/clock/Cargo.toml new file mode 100644 index 000000000..0177b2fb3 --- /dev/null +++ b/packages/clock/Cargo.toml @@ -0,0 +1,24 @@ +[package] +description = "A library to a clock for the torrust tracker." +keywords = ["clock", "library", "torrents"] +name = "torrust-tracker-clock" +readme = "README.md" + +authors.workspace = true +categories.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +chrono = { version = "0", default-features = false, features = ["clock"] } +lazy_static = "1" + +torrust-tracker-primitives = { version = "3.0.0-alpha.12", path = "../primitives" } + +[dev-dependencies] diff --git a/packages/clock/README.md b/packages/clock/README.md new file mode 100644 index 000000000..bfdd7808f --- /dev/null +++ b/packages/clock/README.md @@ -0,0 +1,11 @@ +# Torrust Tracker Clock + +A library to provide a working and mockable clock for the [Torrust Tracker](https://github.com/torrust/torrust-tracker). + +## Documentation + +[Crate documentation](https://docs.rs/torrust-tracker-torrent-clock). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/packages/clock/src/clock/mod.rs b/packages/clock/src/clock/mod.rs new file mode 100644 index 000000000..50afbc9db --- /dev/null +++ b/packages/clock/src/clock/mod.rs @@ -0,0 +1,72 @@ +use std::time::Duration; + +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use self::stopped::StoppedClock; +use self::working::WorkingClock; + +pub mod stopped; +pub mod working; + +/// A generic structure that represents a clock. +/// +/// It can be either the working clock (production) or the stopped clock +/// (testing). It implements the `Time` trait, which gives you the current time. +#[derive(Debug)] +pub struct Clock { + clock: std::marker::PhantomData, +} + +/// The working clock. It returns the current time. +pub type Working = Clock; +/// The stopped clock. It returns always the same fixed time. +pub type Stopped = Clock; + +/// Trait for types that can be used as a timestamp clock. +pub trait Time: Sized { + fn now() -> DurationSinceUnixEpoch; + + fn dbg_clock_type() -> String; + + #[must_use] + fn now_add(add_time: &Duration) -> Option { + Self::now().checked_add(*add_time) + } + #[must_use] + fn now_sub(sub_time: &Duration) -> Option { + Self::now().checked_sub(*sub_time) + } +} + +#[cfg(test)] +mod tests { + use std::any::TypeId; + use std::time::Duration; + + use crate::clock::{self, Stopped, Time, Working}; + use crate::CurrentClock; + + #[test] + fn it_should_be_the_stopped_clock_as_default_when_testing() { + // We are testing, so we should default to the fixed time. + assert_eq!(TypeId::of::(), TypeId::of::()); + assert_eq!(Stopped::now(), CurrentClock::now()); + } + + #[test] + fn it_should_have_different_times() { + assert_ne!(TypeId::of::(), TypeId::of::()); + assert_ne!(Stopped::now(), Working::now()); + } + + #[test] + fn it_should_use_stopped_time_for_testing() { + assert_eq!(CurrentClock::dbg_clock_type(), "Stopped".to_owned()); + + let time = CurrentClock::now(); + std::thread::sleep(Duration::from_millis(50)); + let time_2 = CurrentClock::now(); + + assert_eq!(time, time_2); + } +} diff --git a/packages/clock/src/clock/stopped/mod.rs b/packages/clock/src/clock/stopped/mod.rs new file mode 100644 index 000000000..57655ab75 --- /dev/null +++ b/packages/clock/src/clock/stopped/mod.rs @@ -0,0 +1,210 @@ +/// Trait for types that can be used as a timestamp clock stopped +/// at a given time. + +#[allow(clippy::module_name_repetitions)] +pub struct StoppedClock {} + +#[allow(clippy::module_name_repetitions)] +pub trait Stopped: clock::Time { + /// It sets the clock to a given time. + fn local_set(unix_time: &DurationSinceUnixEpoch); + + /// It sets the clock to the Unix Epoch. + fn local_set_to_unix_epoch() { + Self::local_set(&DurationSinceUnixEpoch::ZERO); + } + + /// It sets the clock to the time the application started. + fn local_set_to_app_start_time(); + + /// It sets the clock to the current system time. + fn local_set_to_system_time_now(); + + /// It adds a `Duration` to the clock. + /// + /// # Errors + /// + /// Will return `IntErrorKind` if `duration` would overflow the internal `Duration`. + fn local_add(duration: &Duration) -> Result<(), IntErrorKind>; + + /// It subtracts a `Duration` from the clock. + /// # Errors + /// + /// Will return `IntErrorKind` if `duration` would underflow the internal `Duration`. + fn local_sub(duration: &Duration) -> Result<(), IntErrorKind>; + + /// It resets the clock to default fixed time that is application start time (or the unix epoch when testing). + fn local_reset(); +} + +use std::num::IntErrorKind; +use std::time::Duration; + +use super::{DurationSinceUnixEpoch, Time}; +use crate::clock; + +impl Time for clock::Stopped { + fn now() -> DurationSinceUnixEpoch { + detail::FIXED_TIME.with(|time| { + return *time.borrow(); + }) + } + + fn dbg_clock_type() -> String { + "Stopped".to_owned() + } +} + +impl Stopped for clock::Stopped { + fn local_set(unix_time: &DurationSinceUnixEpoch) { + detail::FIXED_TIME.with(|time| { + *time.borrow_mut() = *unix_time; + }); + } + + fn local_set_to_app_start_time() { + Self::local_set(&detail::get_app_start_time()); + } + + fn local_set_to_system_time_now() { + Self::local_set(&detail::get_app_start_time()); + } + + fn local_add(duration: &Duration) -> Result<(), IntErrorKind> { + detail::FIXED_TIME.with(|time| { + let time_borrowed = *time.borrow(); + *time.borrow_mut() = match time_borrowed.checked_add(*duration) { + Some(time) => time, + None => { + return Err(IntErrorKind::PosOverflow); + } + }; + Ok(()) + }) + } + + fn local_sub(duration: &Duration) -> Result<(), IntErrorKind> { + detail::FIXED_TIME.with(|time| { + let time_borrowed = *time.borrow(); + *time.borrow_mut() = match time_borrowed.checked_sub(*duration) { + Some(time) => time, + None => { + return Err(IntErrorKind::NegOverflow); + } + }; + Ok(()) + }) + } + + fn local_reset() { + Self::local_set(&detail::get_default_fixed_time()); + } +} + +#[cfg(test)] +mod tests { + use std::thread; + use std::time::Duration; + + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::clock::stopped::Stopped as _; + use crate::clock::{Stopped, Time, Working}; + + #[test] + fn it_should_default_to_zero_when_testing() { + assert_eq!(Stopped::now(), DurationSinceUnixEpoch::ZERO); + } + + #[test] + fn it_should_possible_to_set_the_time() { + // Check we start with ZERO. + assert_eq!(Stopped::now(), Duration::ZERO); + + // Set to Current Time and Check + let timestamp = Working::now(); + Stopped::local_set(×tamp); + assert_eq!(Stopped::now(), timestamp); + + // Elapse the Current Time and Check + Stopped::local_add(×tamp).unwrap(); + assert_eq!(Stopped::now(), timestamp + timestamp); + + // Reset to ZERO and Check + Stopped::local_reset(); + assert_eq!(Stopped::now(), Duration::ZERO); + } + + #[test] + fn it_should_default_to_zero_on_thread_exit() { + assert_eq!(Stopped::now(), Duration::ZERO); + let after5 = Working::now_add(&Duration::from_secs(5)).unwrap(); + Stopped::local_set(&after5); + assert_eq!(Stopped::now(), after5); + + let t = thread::spawn(move || { + // each thread starts out with the initial value of ZERO + assert_eq!(Stopped::now(), Duration::ZERO); + + // and gets set to the current time. + let timestamp = Working::now(); + Stopped::local_set(×tamp); + assert_eq!(Stopped::now(), timestamp); + }); + + // wait for the thread to complete and bail out on panic + t.join().unwrap(); + + // we retain our original value of current time + 5sec despite the child thread + assert_eq!(Stopped::now(), after5); + + // Reset to ZERO and Check + Stopped::local_reset(); + assert_eq!(Stopped::now(), Duration::ZERO); + } +} + +mod detail { + use std::cell::RefCell; + use std::time::SystemTime; + + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::static_time; + + thread_local!(pub static FIXED_TIME: RefCell = RefCell::new(get_default_fixed_time())); + + pub fn get_app_start_time() -> DurationSinceUnixEpoch { + (*static_time::TIME_AT_APP_START) + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + } + + #[cfg(not(test))] + pub fn get_default_fixed_time() -> DurationSinceUnixEpoch { + get_app_start_time() + } + + #[cfg(test)] + pub fn get_default_fixed_time() -> DurationSinceUnixEpoch { + DurationSinceUnixEpoch::ZERO + } + + #[cfg(test)] + mod tests { + use std::time::Duration; + + use crate::clock::stopped::detail::{get_app_start_time, get_default_fixed_time}; + + #[test] + fn it_should_get_the_zero_start_time_when_testing() { + assert_eq!(get_default_fixed_time(), Duration::ZERO); + } + + #[test] + fn it_should_get_app_start_time() { + const TIME_AT_WRITING_THIS_TEST: Duration = Duration::new(1_662_983_731, 22312); + assert!(get_app_start_time() > TIME_AT_WRITING_THIS_TEST); + } + } +} diff --git a/packages/clock/src/clock/working/mod.rs b/packages/clock/src/clock/working/mod.rs new file mode 100644 index 000000000..6d0b4dcf7 --- /dev/null +++ b/packages/clock/src/clock/working/mod.rs @@ -0,0 +1,18 @@ +use std::time::SystemTime; + +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use crate::clock; + +#[allow(clippy::module_name_repetitions)] +pub struct WorkingClock; + +impl clock::Time for clock::Working { + fn now() -> DurationSinceUnixEpoch { + SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap() + } + + fn dbg_clock_type() -> String { + "Working".to_owned() + } +} diff --git a/packages/clock/src/conv/mod.rs b/packages/clock/src/conv/mod.rs new file mode 100644 index 000000000..f70950c38 --- /dev/null +++ b/packages/clock/src/conv/mod.rs @@ -0,0 +1,82 @@ +use std::str::FromStr; + +use chrono::{DateTime, Utc}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +/// It converts a string in ISO 8601 format to a timestamp. +/// For example, the string `1970-01-01T00:00:00.000Z` which is the Unix Epoch +/// will be converted to a timestamp of 0: `DurationSinceUnixEpoch::ZERO`. +/// +/// # Panics +/// +/// Will panic if the input time cannot be converted to `DateTime::`, internally using the `i64` type. +/// (this will naturally happen in 292.5 billion years) +#[must_use] +pub fn convert_from_iso_8601_to_timestamp(iso_8601: &str) -> DurationSinceUnixEpoch { + convert_from_datetime_utc_to_timestamp(&DateTime::::from_str(iso_8601).unwrap()) +} + +/// It converts a `DateTime::` to a timestamp. +/// For example, the `DateTime::` of the Unix Epoch will be converted to a +/// timestamp of 0: `DurationSinceUnixEpoch::ZERO`. +/// +/// # Panics +/// +/// Will panic if the input time overflows the `u64` type. +/// (this will naturally happen in 584.9 billion years) +#[must_use] +pub fn convert_from_datetime_utc_to_timestamp(datetime_utc: &DateTime) -> DurationSinceUnixEpoch { + DurationSinceUnixEpoch::from_secs(u64::try_from(datetime_utc.timestamp()).expect("Overflow of u64 seconds, very future!")) +} + +/// It converts a timestamp to a `DateTime::`. +/// For example, the timestamp of 0: `DurationSinceUnixEpoch::ZERO` will be +/// converted to the `DateTime::` of the Unix Epoch. +/// +/// # Panics +/// +/// Will panic if the input time overflows the `u64` seconds overflows the `i64` type. +/// (this will naturally happen in 292.5 billion years) +#[must_use] +pub fn convert_from_timestamp_to_datetime_utc(duration: DurationSinceUnixEpoch) -> DateTime { + DateTime::from_timestamp( + i64::try_from(duration.as_secs()).expect("Overflow of i64 seconds, very future!"), + duration.subsec_nanos(), + ) + .unwrap() +} + +#[cfg(test)] + +mod tests { + use chrono::DateTime; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::conv::{ + convert_from_datetime_utc_to_timestamp, convert_from_iso_8601_to_timestamp, convert_from_timestamp_to_datetime_utc, + }; + + #[test] + fn should_be_converted_to_datetime_utc() { + let timestamp = DurationSinceUnixEpoch::ZERO; + assert_eq!( + convert_from_timestamp_to_datetime_utc(timestamp), + DateTime::from_timestamp(0, 0).unwrap() + ); + } + + #[test] + fn should_be_converted_from_datetime_utc() { + let datetime = DateTime::from_timestamp(0, 0).unwrap(); + assert_eq!( + convert_from_datetime_utc_to_timestamp(&datetime), + DurationSinceUnixEpoch::ZERO + ); + } + + #[test] + fn should_be_converted_from_datetime_utc_in_iso_8601() { + let iso_8601 = "1970-01-01T00:00:00.000Z".to_string(); + assert_eq!(convert_from_iso_8601_to_timestamp(&iso_8601), DurationSinceUnixEpoch::ZERO); + } +} diff --git a/packages/clock/src/lib.rs b/packages/clock/src/lib.rs new file mode 100644 index 000000000..295d22c16 --- /dev/null +++ b/packages/clock/src/lib.rs @@ -0,0 +1,53 @@ +//! Time related functions and types. +//! +//! It's usually a good idea to control where the time comes from +//! in an application so that it can be mocked for testing and it can be +//! controlled in production so we get the intended behavior without +//! relying on the specific time zone for the underlying system. +//! +//! Clocks use the type `DurationSinceUnixEpoch` which is a +//! `std::time::Duration` since the Unix Epoch (timestamp). +//! +//! ```text +//! Local time: lun 2023-03-27 16:12:00 WEST +//! Universal time: lun 2023-03-27 15:12:00 UTC +//! Time zone: Atlantic/Canary (WEST, +0100) +//! Timestamp: 1679929914 +//! Duration: 1679929914.10167426 +//! ``` +//! +//! > **NOTICE**: internally the `Duration` is stores it's main unit as seconds in a `u64` and it will +//! > overflow in 584.9 billion years. +//! +//! > **NOTICE**: the timestamp does not depend on the time zone. That gives you +//! > the ability to use the clock regardless of the underlying system time zone +//! > configuration. See [Unix time Wikipedia entry](https://en.wikipedia.org/wiki/Unix_time). + +pub mod clock; +pub mod conv; +pub mod static_time; +pub mod time_extent; + +#[macro_use] +extern crate lazy_static; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; + +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type DefaultTimeExtentMaker = time_extent::WorkingTimeExtentMaker; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type DefaultTimeExtentMaker = time_extent::StoppedTimeExtentMaker; diff --git a/src/shared/clock/static_time.rs b/packages/clock/src/static_time/mod.rs similarity index 100% rename from src/shared/clock/static_time.rs rename to packages/clock/src/static_time/mod.rs diff --git a/src/shared/clock/time_extent.rs b/packages/clock/src/time_extent/mod.rs similarity index 85% rename from src/shared/clock/time_extent.rs rename to packages/clock/src/time_extent/mod.rs index a5a359e52..c51849f21 100644 --- a/src/shared/clock/time_extent.rs +++ b/packages/clock/src/time_extent/mod.rs @@ -65,7 +65,7 @@ use std::num::{IntErrorKind, TryFromIntError}; use std::time::Duration; -use super::{Stopped, TimeNow, Type, Working}; +use crate::clock::{self, Stopped, Working}; /// This trait defines the operations that can be performed on a `TimeExtent`. pub trait Extent: Sized + Default { @@ -199,10 +199,10 @@ impl Extent for TimeExtent { /// It gives you the time in time extents. pub trait Make: Sized where - Clock: TimeNow, + Clock: clock::Time, { /// It gives you the current time extent (with a certain increment) for - /// the current time. It gets the current timestamp front he `Clock`. + /// the current time. It gets the current timestamp front the `Clock`. /// /// For example: /// @@ -223,12 +223,12 @@ where }) } - /// Same as [`now`](crate::shared::clock::time_extent::Make::now), but it + /// Same as [`now`](crate::time_extent::Make::now), but it /// will add an extra duration to the current time before calculating the /// time extent. It gives you a time extent for a time in the future. #[must_use] fn now_after(increment: &Base, add_time: &Duration) -> Option> { - match Clock::add(add_time) { + match Clock::now_add(add_time) { None => None, Some(time) => time .as_nanos() @@ -240,12 +240,12 @@ where } } - /// Same as [`now`](crate::shared::clock::time_extent::Make::now), but it + /// Same as [`now`](crate::time_extent::Make::now), but it /// will subtract a duration to the current time before calculating the /// time extent. It gives you a time extent for a time in the past. #[must_use] fn now_before(increment: &Base, sub_time: &Duration) -> Option> { - match Clock::sub(sub_time) { + match Clock::now_sub(sub_time) { None => None, Some(time) => time .as_nanos() @@ -262,38 +262,30 @@ where /// /// It's a clock which measures time in `TimeExtents`. #[derive(Debug)] -pub struct Maker {} +pub struct Maker { + clock: std::marker::PhantomData, +} /// A `TimeExtent` maker which makes `TimeExtents` from the `Working` clock. -pub type WorkingTimeExtentMaker = Maker<{ Type::WorkingClock as usize }>; +pub type WorkingTimeExtentMaker = Maker; /// A `TimeExtent` maker which makes `TimeExtents` from the `Stopped` clock. -pub type StoppedTimeExtentMaker = Maker<{ Type::StoppedClock as usize }>; - -impl Make for WorkingTimeExtentMaker {} -impl Make for StoppedTimeExtentMaker {} +pub type StoppedTimeExtentMaker = Maker; -/// The default `TimeExtent` maker. It is `WorkingTimeExtentMaker` in production -/// and `StoppedTimeExtentMaker` in tests. -#[cfg(not(test))] -pub type DefaultTimeExtentMaker = WorkingTimeExtentMaker; - -/// The default `TimeExtent` maker. It is `WorkingTimeExtentMaker` in production -/// and `StoppedTimeExtentMaker` in tests. -#[cfg(test)] -pub type DefaultTimeExtentMaker = StoppedTimeExtentMaker; +impl Make for WorkingTimeExtentMaker {} +impl Make for StoppedTimeExtentMaker {} #[cfg(test)] mod test { - use crate::shared::clock::time_extent::TimeExtent; + use crate::time_extent::TimeExtent; const TIME_EXTENT_VAL: TimeExtent = TimeExtent::from_sec(2, &239_812_388_723); mod fn_checked_duration_from_nanos { use std::time::Duration; - use crate::shared::clock::time_extent::checked_duration_from_nanos; - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::checked_duration_from_nanos; + use crate::time_extent::test::TIME_EXTENT_VAL; const NANOS_PER_SEC: u32 = 1_000_000_000; @@ -334,7 +326,7 @@ mod test { mod time_extent { mod fn_default { - use crate::shared::clock::time_extent::{TimeExtent, ZERO}; + use crate::time_extent::{TimeExtent, ZERO}; #[test] fn it_should_default_initialize_to_zero() { @@ -343,8 +335,8 @@ mod test { } mod fn_from_sec { - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Multiplier, TimeExtent, ZERO}; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Multiplier, TimeExtent, ZERO}; #[test] fn it_should_make_empty_for_zero() { @@ -360,8 +352,8 @@ mod test { } mod fn_new { - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Base, Extent, Multiplier, TimeExtent, ZERO}; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Base, Extent, Multiplier, TimeExtent, ZERO}; #[test] fn it_should_make_empty_for_zero() { @@ -383,8 +375,8 @@ mod test { mod fn_increase { use std::num::IntErrorKind; - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Extent, TimeExtent, ZERO}; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Extent, TimeExtent, ZERO}; #[test] fn it_should_not_increase_for_zero() { @@ -411,8 +403,8 @@ mod test { mod fn_decrease { use std::num::IntErrorKind; - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Extent, TimeExtent, ZERO}; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Extent, TimeExtent, ZERO}; #[test] fn it_should_not_decrease_for_zero() { @@ -437,8 +429,8 @@ mod test { } mod fn_total { - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Base, Extent, Product, TimeExtent, MAX, ZERO}; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Base, Extent, Product, TimeExtent, MAX, ZERO}; #[test] fn it_should_be_zero_for_zero() { @@ -485,8 +477,8 @@ mod test { } mod fn_total_next { - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Base, Extent, Product, TimeExtent, MAX, ZERO}; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Base, Extent, Product, TimeExtent, MAX, ZERO}; #[test] fn it_should_be_zero_for_zero() { @@ -542,9 +534,12 @@ mod test { mod make_time_extent { mod fn_now { - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Base, DefaultTimeExtentMaker, Make, TimeExtent}; - use crate::shared::clock::{Current, DurationSinceUnixEpoch, StoppedTime}; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::clock::stopped::Stopped as _; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Base, Make, TimeExtent}; + use crate::{CurrentClock, DefaultTimeExtentMaker}; #[test] fn it_should_give_a_time_extent() { @@ -556,7 +551,7 @@ mod test { } ); - Current::local_set(&DurationSinceUnixEpoch::from_secs(TIME_EXTENT_VAL.amount * 2)); + CurrentClock::local_set(&DurationSinceUnixEpoch::from_secs(TIME_EXTENT_VAL.amount * 2)); assert_eq!( DefaultTimeExtentMaker::now(&TIME_EXTENT_VAL.increment).unwrap().unwrap(), @@ -571,7 +566,7 @@ mod test { #[test] fn it_should_fail_if_amount_exceeds_bounds() { - Current::local_set(&DurationSinceUnixEpoch::MAX); + CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( DefaultTimeExtentMaker::now(&Base::from_millis(1)).unwrap().unwrap_err(), u64::try_from(u128::MAX).unwrap_err() @@ -582,9 +577,12 @@ mod test { mod fn_now_after { use std::time::Duration; - use crate::shared::clock::time_extent::test::TIME_EXTENT_VAL; - use crate::shared::clock::time_extent::{Base, DefaultTimeExtentMaker, Make}; - use crate::shared::clock::{Current, DurationSinceUnixEpoch, StoppedTime}; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::clock::stopped::Stopped as _; + use crate::time_extent::test::TIME_EXTENT_VAL; + use crate::time_extent::{Base, Make}; + use crate::{CurrentClock, DefaultTimeExtentMaker}; #[test] fn it_should_give_a_time_extent() { @@ -603,13 +601,13 @@ mod test { fn it_should_fail_for_zero() { assert_eq!(DefaultTimeExtentMaker::now_after(&Base::ZERO, &Duration::ZERO), None); - Current::local_set(&DurationSinceUnixEpoch::MAX); + CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!(DefaultTimeExtentMaker::now_after(&Base::ZERO, &Duration::MAX), None); } #[test] fn it_should_fail_if_amount_exceeds_bounds() { - Current::local_set(&DurationSinceUnixEpoch::MAX); + CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( DefaultTimeExtentMaker::now_after(&Base::from_millis(1), &Duration::ZERO) .unwrap() @@ -621,12 +619,15 @@ mod test { mod fn_now_before { use std::time::Duration; - use crate::shared::clock::time_extent::{Base, DefaultTimeExtentMaker, Make, TimeExtent}; - use crate::shared::clock::{Current, DurationSinceUnixEpoch, StoppedTime}; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::clock::stopped::Stopped as _; + use crate::time_extent::{Base, Make, TimeExtent}; + use crate::{CurrentClock, DefaultTimeExtentMaker}; #[test] fn it_should_give_a_time_extent() { - Current::local_set(&DurationSinceUnixEpoch::MAX); + CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( DefaultTimeExtentMaker::now_before( @@ -651,7 +652,7 @@ mod test { #[test] fn it_should_fail_if_amount_exceeds_bounds() { - Current::local_set(&DurationSinceUnixEpoch::MAX); + CurrentClock::local_set(&DurationSinceUnixEpoch::MAX); assert_eq!( DefaultTimeExtentMaker::now_before(&Base::from_millis(1), &Duration::ZERO) .unwrap() diff --git a/packages/clock/tests/clock/mod.rs b/packages/clock/tests/clock/mod.rs new file mode 100644 index 000000000..5d94bb83d --- /dev/null +++ b/packages/clock/tests/clock/mod.rs @@ -0,0 +1,16 @@ +use std::time::Duration; + +use torrust_tracker_clock::clock::Time; + +use crate::CurrentClock; + +#[test] +fn it_should_use_stopped_time_for_testing() { + assert_eq!(CurrentClock::dbg_clock_type(), "Stopped".to_owned()); + + let time = CurrentClock::now(); + std::thread::sleep(Duration::from_millis(50)); + let time_2 = CurrentClock::now(); + + assert_eq!(time, time_2); +} diff --git a/packages/clock/tests/integration.rs b/packages/clock/tests/integration.rs new file mode 100644 index 000000000..fa500227a --- /dev/null +++ b/packages/clock/tests/integration.rs @@ -0,0 +1,19 @@ +//! Integration tests. +//! +//! ```text +//! cargo test --test integration +//! ``` + +//mod common; +mod clock; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = torrust_tracker_clock::clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = torrust_tracker_clock::clock::Stopped; diff --git a/packages/configuration/Cargo.toml b/packages/configuration/Cargo.toml index cc300afe0..a4c3f2006 100644 --- a/packages/configuration/Cargo.toml +++ b/packages/configuration/Cargo.toml @@ -8,21 +8,23 @@ authors.workspace = true documentation.workspace = true edition.workspace = true homepage.workspace = true -license-file.workspace = true +license.workspace = true publish.workspace = true repository.workspace = true rust-version.workspace = true version.workspace = true [dependencies] -config = "0.13" -log = { version = "0.4", features = ["release_max_level_info"] } -serde = { version = "1.0", features = ["derive"] } -serde_with = "3.2" -thiserror = "1.0" -toml = "0.8" -torrust-tracker-located-error = { version = "3.0.0-alpha.11", path = "../located-error" } -torrust-tracker-primitives = { version = "3.0.0-alpha.11", path = "../primitives" } +camino = { version = "1", features = ["serde", "serde1"] } +derive_more = "0" +figment = { version = "0", features = ["env", "test", "toml"] } +serde = { version = "1", features = ["derive"] } +serde_json = { version = "1", features = ["preserve_order"] } +serde_with = "3" +thiserror = "1" +toml = "0" +torrust-tracker-located-error = { version = "3.0.0-alpha.12", path = "../located-error" } +url = "2" [dev-dependencies] uuid = { version = "1", features = ["v4"] } diff --git a/packages/configuration/src/lib.rs b/packages/configuration/src/lib.rs index 059316a26..bdbe419ca 100644 --- a/packages/configuration/src/lib.rs +++ b/packages/configuration/src/lib.rs @@ -3,398 +3,231 @@ //! This module contains the configuration data structures for the //! Torrust Tracker, which is a `BitTorrent` tracker server. //! -//! The configuration is loaded from a [TOML](https://toml.io/en/) file -//! `tracker.toml` in the project root folder or from an environment variable -//! with the same content as the file. -//! -//! When you run the tracker without a configuration file, a new one will be -//! created with the default values, but the tracker immediately exits. You can -//! then edit the configuration file and run the tracker again. -//! -//! Configuration can not only be loaded from a file, but also from environment -//! variable `TORRUST_TRACKER_CONFIG`. This is useful when running the tracker -//! in a Docker container or environments where you do not have a persistent -//! storage or you cannot inject a configuration file. Refer to -//! [`Torrust Tracker documentation`](https://docs.rs/torrust-tracker) for more -//! information about how to pass configuration to the tracker. -//! -//! # Table of contents -//! -//! - [Sections](#sections) -//! - [Port binding](#port-binding) -//! - [TSL support](#tsl-support) -//! - [Generating self-signed certificates](#generating-self-signed-certificates) -//! - [Default configuration](#default-configuration) -//! -//! ## Sections -//! -//! Each section in the toml structure is mapped to a data structure. For -//! example, the `[http_api]` section (configuration for the tracker HTTP API) -//! is mapped to the [`HttpApi`](HttpApi) structure. -//! -//! > **NOTICE**: some sections are arrays of structures. For example, the -//! > `[[udp_trackers]]` section is an array of [`UdpTracker`](UdpTracker) since -//! > you can have multiple running UDP trackers bound to different ports. -//! -//! Please refer to the documentation of each structure for more information -//! about each section. -//! -//! - [`Core configuration`](crate::Configuration) -//! - [`HTTP API configuration`](crate::HttpApi) -//! - [`HTTP Tracker configuration`](crate::HttpTracker) -//! - [`UDP Tracker configuration`](crate::UdpTracker) -//! -//! ## Port binding -//! -//! For the API, HTTP and UDP trackers you can bind to a random port by using -//! port `0`. For example, if you want to bind to a random port on all -//! interfaces, use `0.0.0.0:0`. The OS will choose a random port but the -//! tracker will not print the port it is listening to when it starts. It just -//! says `Starting Torrust HTTP tracker server on: http://0.0.0.0:0`. It shows -//! the port used in the configuration file, and not the port the -//! tracker is actually listening to. This is a planned feature, see issue -//! [186](https://github.com/torrust/torrust-tracker/issues/186) for more -//! information. -//! -//! ## TSL support -//! -//! For the API and HTTP tracker you can enable TSL by setting `ssl_enabled` to -//! `true` and setting the paths to the certificate and key files. -//! -//! Typically, you will have a directory structure like this: -//! -//! ```text -//! storage/ -//! ├── database -//! │ └── data.db -//! └── tls -//! ├── localhost.crt -//! └── localhost.key -//! ``` -//! -//! where you can store all the persistent data. -//! -//! Alternatively, you could setup a reverse proxy like Nginx or Apache to -//! handle the SSL/TLS part and forward the requests to the tracker. If you do -//! that, you should set [`on_reverse_proxy`](crate::Configuration::on_reverse_proxy) -//! to `true` in the configuration file. It's out of scope for this -//! documentation to explain in detail how to setup a reverse proxy, but the -//! configuration file should be something like this: -//! -//! For [NGINX](https://docs.nginx.com/nginx/admin-guide/web-server/reverse-proxy/): -//! -//! ```text -//! # HTTPS only (with SSL - force redirect to HTTPS) -//! -//! server { -//! listen 80; -//! server_name tracker.torrust.com; -//! -//! return 301 https://$host$request_uri; -//! } -//! -//! server { -//! listen 443; -//! server_name tracker.torrust.com; -//! -//! ssl_certificate CERT_PATH -//! ssl_certificate_key CERT_KEY_PATH; -//! -//! location / { -//! proxy_set_header X-Forwarded-For $remote_addr; -//! proxy_pass http://127.0.0.1:6969; -//! } -//! } -//! ``` -//! -//! For [Apache](https://httpd.apache.org/docs/2.4/howto/reverse_proxy.html): -//! -//! ```text -//! # HTTPS only (with SSL - force redirect to HTTPS) -//! -//! -//! ServerAdmin webmaster@tracker.torrust.com -//! ServerName tracker.torrust.com -//! -//! -//! RewriteEngine on -//! RewriteCond %{HTTPS} off -//! RewriteRule ^ https://%{SERVER_NAME}%{REQUEST_URI} [END,NE,R=permanent] -//! -//! -//! -//! -//! -//! ServerAdmin webmaster@tracker.torrust.com -//! ServerName tracker.torrust.com -//! -//! -//! Order allow,deny -//! Allow from all -//! -//! -//! ProxyPreserveHost On -//! ProxyRequests Off -//! AllowEncodedSlashes NoDecode -//! -//! ProxyPass / http://localhost:3000/ -//! ProxyPassReverse / http://localhost:3000/ -//! ProxyPassReverse / http://tracker.torrust.com/ -//! -//! RequestHeader set X-Forwarded-Proto "https" -//! RequestHeader set X-Forwarded-Port "443" -//! -//! ErrorLog ${APACHE_LOG_DIR}/tracker.torrust.com-error.log -//! CustomLog ${APACHE_LOG_DIR}/tracker.torrust.com-access.log combined -//! -//! SSLCertificateFile CERT_PATH -//! SSLCertificateKeyFile CERT_KEY_PATH -//! -//! -//! ``` -//! -//! ## Generating self-signed certificates -//! -//! For testing purposes, you can use self-signed certificates. -//! -//! Refer to [Let's Encrypt - Certificates for localhost](https://letsencrypt.org/docs/certificates-for-localhost/) -//! for more information. -//! -//! Running the following command will generate a certificate (`localhost.crt`) -//! and key (`localhost.key`) file in your current directory: -//! -//! ```s -//! openssl req -x509 -out localhost.crt -keyout localhost.key \ -//! -newkey rsa:2048 -nodes -sha256 \ -//! -subj '/CN=localhost' -extensions EXT -config <( \ -//! printf "[dn]\nCN=localhost\n[req]\ndistinguished_name = dn\n[EXT]\nsubjectAltName=DNS:localhost\nkeyUsage=digitalSignature\nextendedKeyUsage=serverAuth") -//! ``` -//! -//! You can then use the generated files in the configuration file: -//! -//! ```s -//! [[http_trackers]] -//! enabled = true -//! ... -//! ssl_cert_path = "./storage/tracker/lib/tls/localhost.crt" -//! ssl_key_path = "./storage/tracker/lib/tls/localhost.key" -//! -//! [http_api] -//! enabled = true -//! ... -//! ssl_cert_path = "./storage/tracker/lib/tls/localhost.crt" -//! ssl_key_path = "./storage/tracker/lib/tls/localhost.key" -//! ``` -//! -//! ## Default configuration -//! -//! The default configuration is: -//! -//! ```toml -//! log_level = "info" -//! mode = "public" -//! db_driver = "Sqlite3" -//! db_path = "./storage/tracker/lib/database/sqlite3.db" -//! announce_interval = 120 -//! min_announce_interval = 120 -//! max_peer_timeout = 900 -//! on_reverse_proxy = false -//! external_ip = "0.0.0.0" -//! tracker_usage_statistics = true -//! persistent_torrent_completed_stat = false -//! inactive_peer_cleanup_interval = 600 -//! remove_peerless_torrents = true -//! -//! [[udp_trackers]] -//! enabled = false -//! bind_address = "0.0.0.0:6969" -//! -//! [[http_trackers]] -//! enabled = false -//! bind_address = "0.0.0.0:7070" -//! ssl_enabled = false -//! ssl_cert_path = "" -//! ssl_key_path = "" -//! -//! [http_api] -//! enabled = true -//! bind_address = "127.0.0.1:1212" -//! ssl_enabled = false -//! ssl_cert_path = "" -//! ssl_key_path = "" -//! -//! [http_api.access_tokens] -//! admin = "MyAccessToken" -//!``` -use std::collections::{HashMap, HashSet}; -use std::net::IpAddr; -use std::str::FromStr; +//! The current version for configuration is [`v2`]. +pub mod v2_0_0; +pub mod validator; + +use std::collections::HashMap; +use std::env; use std::sync::Arc; -use std::{env, fs}; +use std::time::Duration; -use config::{Config, ConfigError, File, FileFormat}; +use camino::Utf8PathBuf; +use derive_more::{Constructor, Display}; use serde::{Deserialize, Serialize}; -use serde_with::{serde_as, NoneAsEmptyString}; +use serde_with::serde_as; use thiserror::Error; -use torrust_tracker_located_error::{Located, LocatedError}; -use torrust_tracker_primitives::{DatabaseDriver, TrackerMode}; +use torrust_tracker_located_error::{DynError, LocatedError}; + +/// The maximum number of returned peers for a torrent. +pub const TORRENT_PEERS_LIMIT: usize = 74; + +/// Default timeout for sending and receiving packets. And waiting for sockets +/// to be readable and writable. +pub const DEFAULT_TIMEOUT: Duration = Duration::from_secs(5); + +// Environment variables + +/// The whole `tracker.toml` file content. It has priority over the config file. +/// Even if the file is not on the default path. +const ENV_VAR_CONFIG_TOML: &str = "TORRUST_TRACKER_CONFIG_TOML"; + +/// The `tracker.toml` file location. +pub const ENV_VAR_CONFIG_TOML_PATH: &str = "TORRUST_TRACKER_CONFIG_TOML_PATH"; + +pub type Configuration = v2_0_0::Configuration; +pub type Core = v2_0_0::core::Core; +pub type HealthCheckApi = v2_0_0::health_check_api::HealthCheckApi; +pub type HttpApi = v2_0_0::tracker_api::HttpApi; +pub type HttpTracker = v2_0_0::http_tracker::HttpTracker; +pub type UdpTracker = v2_0_0::udp_tracker::UdpTracker; +pub type Database = v2_0_0::database::Database; +pub type Driver = v2_0_0::database::Driver; +pub type Threshold = v2_0_0::logging::Threshold; + +pub type AccessTokens = HashMap; + +pub const LATEST_VERSION: &str = "2.0.0"; + +/// Info about the configuration specification. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Display, Clone)] +#[display(fmt = "Metadata(app: {app}, purpose: {purpose}, schema_version: {schema_version})")] +pub struct Metadata { + /// The application this configuration is valid for. + #[serde(default = "Metadata::default_app")] + app: App, + + /// The purpose of this parsed file. + #[serde(default = "Metadata::default_purpose")] + purpose: Purpose, + + /// The schema version for the configuration. + #[serde(default = "Metadata::default_schema_version")] + #[serde(flatten)] + schema_version: Version, +} + +impl Default for Metadata { + fn default() -> Self { + Self { + app: Self::default_app(), + purpose: Self::default_purpose(), + schema_version: Self::default_schema_version(), + } + } +} + +impl Metadata { + fn default_app() -> App { + App::TorrustTracker + } + + fn default_purpose() -> Purpose { + Purpose::Configuration + } + + fn default_schema_version() -> Version { + Version::latest() + } +} + +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Display, Clone)] +#[serde(rename_all = "kebab-case")] +pub enum App { + TorrustTracker, +} + +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Display, Clone)] +#[serde(rename_all = "lowercase")] +pub enum Purpose { + Configuration, +} + +/// The configuration version. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Display, Clone)] +#[serde(rename_all = "lowercase")] +pub struct Version { + #[serde(default = "Version::default_semver")] + schema_version: String, +} + +impl Default for Version { + fn default() -> Self { + Self { + schema_version: Self::default_semver(), + } + } +} + +impl Version { + fn new(semver: &str) -> Self { + Self { + schema_version: semver.to_owned(), + } + } + + fn latest() -> Self { + Self { + schema_version: LATEST_VERSION.to_string(), + } + } + + fn default_semver() -> String { + LATEST_VERSION.to_string() + } +} + +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone, Constructor)] +pub struct TrackerPolicy { + // Cleanup job configuration + /// Maximum time in seconds that a peer can be inactive before being + /// considered an inactive peer. If a peer is inactive for more than this + /// time, it will be removed from the torrent peer list. + #[serde(default = "TrackerPolicy::default_max_peer_timeout")] + pub max_peer_timeout: u32, + + /// If enabled the tracker will persist the number of completed downloads. + /// That's how many times a torrent has been downloaded completely. + #[serde(default = "TrackerPolicy::default_persistent_torrent_completed_stat")] + pub persistent_torrent_completed_stat: bool, + + /// If enabled, the tracker will remove torrents that have no peers. + /// The clean up torrent job runs every `inactive_peer_cleanup_interval` + /// seconds and it removes inactive peers. Eventually, the peer list of a + /// torrent could be empty and the torrent will be removed if this option is + /// enabled. + #[serde(default = "TrackerPolicy::default_remove_peerless_torrents")] + pub remove_peerless_torrents: bool, +} + +impl Default for TrackerPolicy { + fn default() -> Self { + Self { + max_peer_timeout: Self::default_max_peer_timeout(), + persistent_torrent_completed_stat: Self::default_persistent_torrent_completed_stat(), + remove_peerless_torrents: Self::default_remove_peerless_torrents(), + } + } +} + +impl TrackerPolicy { + fn default_max_peer_timeout() -> u32 { + 900 + } + + fn default_persistent_torrent_completed_stat() -> bool { + false + } + + fn default_remove_peerless_torrents() -> bool { + true + } +} /// Information required for loading config #[derive(Debug, Default, Clone)] pub struct Info { - tracker_toml: String, - api_admin_token: Option, + config_toml: Option, + config_toml_path: String, } impl Info { /// Build Configuration Info /// - /// # Examples - /// - /// ``` - /// use torrust_tracker_configuration::Info; - /// - /// let result = Info::new(env_var_config, env_var_path_config, default_path_config, env_var_api_admin_token); - /// assert_eq!(result, ); - /// ``` - /// /// # Errors /// /// Will return `Err` if unable to obtain a configuration. /// #[allow(clippy::needless_pass_by_value)] - pub fn new( - env_var_config: String, - env_var_path_config: String, - default_path_config: String, - env_var_api_admin_token: String, - ) -> Result { - let tracker_toml = if let Ok(tracker_toml) = env::var(&env_var_config) { - println!("Loading configuration from env var {env_var_config} ..."); - - tracker_toml + pub fn new(default_config_toml_path: String) -> Result { + let env_var_config_toml = ENV_VAR_CONFIG_TOML.to_string(); + let env_var_config_toml_path = ENV_VAR_CONFIG_TOML_PATH.to_string(); + + let config_toml = if let Ok(config_toml) = env::var(env_var_config_toml) { + println!("Loading extra configuration from environment variable:\n {config_toml}"); + Some(config_toml) + } else { + None + }; + + let config_toml_path = if let Ok(config_toml_path) = env::var(env_var_config_toml_path) { + println!("Loading extra configuration from file: `{config_toml_path}` ..."); + config_toml_path } else { - let config_path = if let Ok(config_path) = env::var(env_var_path_config) { - println!("Loading configuration file: `{config_path}` ..."); - - config_path - } else { - println!("Loading default configuration file: `{default_path_config}` ..."); - - default_path_config - }; - - fs::read_to_string(config_path) - .map_err(|e| Error::UnableToLoadFromConfigFile { - source: (Arc::new(e) as Arc).into(), - })? - .parse() - .map_err(|_e: std::convert::Infallible| Error::Infallible)? + println!("Loading extra configuration from default configuration file: `{default_config_toml_path}` ..."); + default_config_toml_path }; - let api_admin_token = env::var(env_var_api_admin_token).ok(); Ok(Self { - tracker_toml, - api_admin_token, + config_toml, + config_toml_path, }) } } -/// Configuration for each UDP tracker. -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] -pub struct UdpTracker { - /// Weather the UDP tracker is enabled or not. - pub enabled: bool, - /// The address the tracker will bind to. - /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to - /// listen to all interfaces, use `0.0.0.0`. If you want the operating - /// system to choose a random port, use port `0`. - pub bind_address: String, -} - -/// Configuration for each HTTP tracker. -#[serde_as] -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] -pub struct HttpTracker { - /// Weather the HTTP tracker is enabled or not. - pub enabled: bool, - /// The address the tracker will bind to. - /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to - /// listen to all interfaces, use `0.0.0.0`. If you want the operating - /// system to choose a random port, use port `0`. - pub bind_address: String, - /// Weather the HTTP tracker will use SSL or not. - pub ssl_enabled: bool, - /// Path to the SSL certificate file. Only used if `ssl_enabled` is `true`. - #[serde_as(as = "NoneAsEmptyString")] - pub ssl_cert_path: Option, - /// Path to the SSL key file. Only used if `ssl_enabled` is `true`. - #[serde_as(as = "NoneAsEmptyString")] - pub ssl_key_path: Option, -} - -/// Configuration for the HTTP API. -#[serde_as] -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] -pub struct HttpApi { - /// Weather the HTTP API is enabled or not. - pub enabled: bool, - /// The address the tracker will bind to. - /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to - /// listen to all interfaces, use `0.0.0.0`. If you want the operating - /// system to choose a random port, use port `0`. - pub bind_address: String, - /// Weather the HTTP API will use SSL or not. - pub ssl_enabled: bool, - /// Path to the SSL certificate file. Only used if `ssl_enabled` is `true`. - #[serde_as(as = "NoneAsEmptyString")] - pub ssl_cert_path: Option, - /// Path to the SSL key file. Only used if `ssl_enabled` is `true`. - #[serde_as(as = "NoneAsEmptyString")] - pub ssl_key_path: Option, - /// Access tokens for the HTTP API. The key is a label identifying the - /// token and the value is the token itself. The token is used to - /// authenticate the user. All tokens are valid for all endpoints and have - /// the all permissions. - pub access_tokens: HashMap, -} - -impl HttpApi { - fn override_admin_token(&mut self, api_admin_token: &str) { - self.access_tokens.insert("admin".to_string(), api_admin_token.to_string()); - } -} - -impl HttpApi { - /// Checks if the given token is one of the token in the configuration. - #[must_use] - pub fn contains_token(&self, token: &str) -> bool { - let tokens: HashMap = self.access_tokens.clone(); - let tokens: HashSet = tokens.into_values().collect(); - tokens.contains(token) - } -} - -/// Core configuration for the tracker. -#[allow(clippy::struct_excessive_bools)] -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug)] -pub struct Configuration { - /// Logging level. Possible values are: `Off`, `Error`, `Warn`, `Info`, - /// `Debug` and `Trace`. Default is `Info`. - pub log_level: Option, - /// Tracker mode. See [`TrackerMode`](torrust_tracker_primitives::TrackerMode) for more information. - pub mode: TrackerMode, - - // Database configuration - /// Database driver. Possible values are: `Sqlite3`, and `MySQL`. - pub db_driver: DatabaseDriver, - /// Database connection string. The format depends on the database driver. - /// For `Sqlite3`, the format is `path/to/database.db`, for example: - /// `./storage/tracker/lib/database/sqlite3.db`. - /// For `Mysql`, the format is `mysql://db_user:db_user_password:port/db_name`, for - /// example: `root:password@localhost:3306/torrust`. - pub db_path: String, - +/// Announce policy +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone, Copy, Constructor)] +pub struct AnnouncePolicy { /// Interval in seconds that the client should wait between sending regular /// announce requests to the tracker. /// @@ -406,7 +239,9 @@ pub struct Configuration { /// client's initial request. It serves as a guideline for clients to know /// how often they should contact the tracker for updates on the peer list, /// while ensuring that the tracker is not overwhelmed with requests. - pub announce_interval: u32, + #[serde(default = "AnnouncePolicy::default_interval")] + pub interval: u32, + /// Minimum announce interval. Clients must not reannounce more frequently /// than this. /// @@ -418,53 +253,27 @@ pub struct Configuration { /// value to prevent sending too many requests in a short period, which /// could lead to excessive load on the tracker or even getting banned by /// the tracker for not adhering to the rules. - pub min_announce_interval: u32, - /// Weather the tracker is behind a reverse proxy or not. - /// If the tracker is behind a reverse proxy, the `X-Forwarded-For` header - /// sent from the proxy will be used to get the client's IP address. - pub on_reverse_proxy: bool, - /// The external IP address of the tracker. If the client is using a - /// loopback IP address, this IP address will be used instead. If the peer - /// is using a loopback IP address, the tracker assumes that the peer is - /// in the same network as the tracker and will use the tracker's IP - /// address instead. - pub external_ip: Option, - /// Weather the tracker should collect statistics about tracker usage. - /// If enabled, the tracker will collect statistics like the number of - /// connections handled, the number of announce requests handled, etc. - /// Refer to the [`Tracker`](https://docs.rs/torrust-tracker) for more - /// information about the collected metrics. - pub tracker_usage_statistics: bool, - /// If enabled the tracker will persist the number of completed downloads. - /// That's how many times a torrent has been downloaded completely. - pub persistent_torrent_completed_stat: bool, + #[serde(default = "AnnouncePolicy::default_interval_min")] + pub interval_min: u32, +} - // Cleanup job configuration - /// Maximum time in seconds that a peer can be inactive before being - /// considered an inactive peer. If a peer is inactive for more than this - /// time, it will be removed from the torrent peer list. - pub max_peer_timeout: u32, - /// Interval in seconds that the cleanup job will run to remove inactive - /// peers from the torrent peer list. - pub inactive_peer_cleanup_interval: u64, - /// If enabled, the tracker will remove torrents that have no peers. - /// THe clean up torrent job runs every `inactive_peer_cleanup_interval` - /// seconds and it removes inactive peers. Eventually, the peer list of a - /// torrent could be empty and the torrent will be removed if this option is - /// enabled. - pub remove_peerless_torrents: bool, +impl Default for AnnouncePolicy { + fn default() -> Self { + Self { + interval: Self::default_interval(), + interval_min: Self::default_interval_min(), + } + } +} + +impl AnnouncePolicy { + fn default_interval() -> u32 { + 120 + } - // Server jobs configuration - /// The list of UDP trackers the tracker is running. Each UDP tracker - /// represents a UDP server that the tracker is running and it has its own - /// configuration. - pub udp_trackers: Vec, - /// The list of HTTP trackers the tracker is running. Each HTTP tracker - /// represents a HTTP server that the tracker is running and it has its own - /// configuration. - pub http_trackers: Vec, - /// The HTTP API configuration. - pub http_api: HttpApi, + fn default_interval_min() -> u32 { + 120 + } } /// Errors that can occur when loading the configuration. @@ -472,7 +281,7 @@ pub struct Configuration { pub enum Error { /// Unable to load the configuration from the environment variable. /// This error only occurs if there is no configuration file and the - /// `TORRUST_TRACKER_CONFIG` environment variable is not set. + /// `TORRUST_TRACKER_CONFIG_TOML` environment variable is not set. #[error("Unable to load from Environmental Variable: {source}")] UnableToLoadFromEnvironmentVariable { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, @@ -485,283 +294,49 @@ pub enum Error { /// Unable to load the configuration from the configuration file. #[error("Failed processing the configuration: {source}")] - ConfigError { source: LocatedError<'static, ConfigError> }, + ConfigError { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, #[error("The error for errors that can never happen.")] Infallible, + + #[error("Unsupported configuration version: {version}")] + UnsupportedVersion { version: Version }, + + #[error("Missing mandatory configuration option. Option path: {path}")] + MissingMandatoryOption { path: String }, } -impl From for Error { +impl From for Error { #[track_caller] - fn from(err: ConfigError) -> Self { + fn from(err: figment::Error) -> Self { Self::ConfigError { - source: Located(err).into(), + source: (Arc::new(err) as DynError).into(), } } } -impl Default for Configuration { - fn default() -> Self { - let mut configuration = Configuration { - log_level: Option::from(String::from("info")), - mode: TrackerMode::Public, - db_driver: DatabaseDriver::Sqlite3, - db_path: String::from("./storage/tracker/lib/database/sqlite3.db"), - announce_interval: 120, - min_announce_interval: 120, - max_peer_timeout: 900, - on_reverse_proxy: false, - external_ip: Some(String::from("0.0.0.0")), - tracker_usage_statistics: true, - persistent_torrent_completed_stat: false, - inactive_peer_cleanup_interval: 600, - remove_peerless_torrents: true, - udp_trackers: Vec::new(), - http_trackers: Vec::new(), - http_api: HttpApi { - enabled: true, - bind_address: String::from("127.0.0.1:1212"), - ssl_enabled: false, - ssl_cert_path: None, - ssl_key_path: None, - access_tokens: [(String::from("admin"), String::from("MyAccessToken"))] - .iter() - .cloned() - .collect(), - }, - }; - configuration.udp_trackers.push(UdpTracker { - enabled: false, - bind_address: String::from("0.0.0.0:6969"), - }); - configuration.http_trackers.push(HttpTracker { - enabled: false, - bind_address: String::from("0.0.0.0:7070"), - ssl_enabled: false, - ssl_cert_path: None, - ssl_key_path: None, - }); - configuration - } -} - -impl Configuration { - fn override_api_admin_token(&mut self, api_admin_token: &str) { - self.http_api.override_admin_token(api_admin_token); - } - - /// Returns the tracker public IP address id defined in the configuration, - /// and `None` otherwise. - #[must_use] - pub fn get_ext_ip(&self) -> Option { - match &self.external_ip { - None => None, - Some(external_ip) => match IpAddr::from_str(external_ip) { - Ok(external_ip) => Some(external_ip), - Err(_) => None, - }, - } - } - - /// Loads the configuration from the configuration file. - /// - /// # Errors - /// - /// Will return `Err` if `path` does not exist or has a bad configuration. - pub fn load_from_file(path: &str) -> Result { - let config_builder = Config::builder(); - - #[allow(unused_assignments)] - let mut config = Config::default(); - - config = config_builder.add_source(File::with_name(path)).build()?; - - let torrust_config: Configuration = config.try_deserialize()?; - - Ok(torrust_config) - } - - /// Saves the default configuration at the given path. - /// - /// # Errors - /// - /// Will return `Err` if `path` is not a valid path or the configuration - /// file cannot be created. - pub fn create_default_configuration_file(path: &str) -> Result { - let config = Configuration::default(); - config.save_to_file(path)?; - Ok(config) - } - - /// Loads the configuration from the `Info` struct. The whole - /// configuration in toml format is included in the `info.tracker_toml` string. - /// - /// Optionally will override the admin api token. - /// - /// # Errors - /// - /// Will return `Err` if the environment variable does not exist or has a bad configuration. - pub fn load(info: &Info) -> Result { - let config_builder = Config::builder() - .add_source(File::from_str(&info.tracker_toml, FileFormat::Toml)) - .build()?; - let mut config: Configuration = config_builder.try_deserialize()?; - - if let Some(ref token) = info.api_admin_token { - config.override_api_admin_token(token); - }; - - Ok(config) - } - - /// Saves the configuration to the configuration file. - /// - /// # Errors - /// - /// Will return `Err` if `filename` does not exist or the user does not have - /// permission to read it. Will also return `Err` if the configuration is - /// not valid or cannot be encoded to TOML. - /// - /// # Panics - /// - /// Will panic if the configuration cannot be written into the file. - pub fn save_to_file(&self, path: &str) -> Result<(), Error> { - fs::write(path, self.to_toml()).expect("Could not write to file!"); - Ok(()) - } - - /// Encodes the configuration to TOML. - fn to_toml(&self) -> String { - toml::to_string(self).expect("Could not encode TOML value") - } +#[serde_as] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone, Default)] +pub struct TslConfig { + /// Path to the SSL certificate file. + #[serde(default = "TslConfig::default_ssl_cert_path")] + pub ssl_cert_path: Utf8PathBuf, + + /// Path to the SSL key file. + #[serde(default = "TslConfig::default_ssl_key_path")] + pub ssl_key_path: Utf8PathBuf, } -#[cfg(test)] -mod tests { - use crate::Configuration; - - #[cfg(test)] - fn default_config_toml() -> String { - let config = r#"log_level = "info" - mode = "public" - db_driver = "Sqlite3" - db_path = "./storage/tracker/lib/database/sqlite3.db" - announce_interval = 120 - min_announce_interval = 120 - on_reverse_proxy = false - external_ip = "0.0.0.0" - tracker_usage_statistics = true - persistent_torrent_completed_stat = false - max_peer_timeout = 900 - inactive_peer_cleanup_interval = 600 - remove_peerless_torrents = true - - [[udp_trackers]] - enabled = false - bind_address = "0.0.0.0:6969" - - [[http_trackers]] - enabled = false - bind_address = "0.0.0.0:7070" - ssl_enabled = false - ssl_cert_path = "" - ssl_key_path = "" - - [http_api] - enabled = true - bind_address = "127.0.0.1:1212" - ssl_enabled = false - ssl_cert_path = "" - ssl_key_path = "" - - [http_api.access_tokens] - admin = "MyAccessToken" - "# - .lines() - .map(str::trim_start) - .collect::>() - .join("\n"); - config +impl TslConfig { + #[allow(clippy::unnecessary_wraps)] + fn default_ssl_cert_path() -> Utf8PathBuf { + Utf8PathBuf::new() } - #[test] - fn configuration_should_have_default_values() { - let configuration = Configuration::default(); - - let toml = toml::to_string(&configuration).expect("Could not encode TOML value"); - - assert_eq!(toml, default_config_toml()); - } - - #[test] - fn configuration_should_contain_the_external_ip() { - let configuration = Configuration::default(); - - assert_eq!(configuration.external_ip, Some(String::from("0.0.0.0"))); - } - - #[test] - fn configuration_should_be_saved_in_a_toml_config_file() { - use std::{env, fs}; - - use uuid::Uuid; - - // Build temp config file path - let temp_directory = env::temp_dir(); - let temp_file = temp_directory.join(format!("test_config_{}.toml", Uuid::new_v4())); - - // Convert to argument type for Configuration::save_to_file - let config_file_path = temp_file; - let path = config_file_path.to_string_lossy().to_string(); - - let default_configuration = Configuration::default(); - - default_configuration - .save_to_file(&path) - .expect("Could not save configuration to file"); - - let contents = fs::read_to_string(&path).expect("Something went wrong reading the file"); - - assert_eq!(contents, default_config_toml()); - } - - #[cfg(test)] - fn create_temp_config_file_with_default_config() -> String { - use std::env; - use std::fs::File; - use std::io::Write; - - use uuid::Uuid; - - // Build temp config file path - let temp_directory = env::temp_dir(); - let temp_file = temp_directory.join(format!("test_config_{}.toml", Uuid::new_v4())); - - // Convert to argument type for Configuration::load_from_file - let config_file_path = temp_file.clone(); - let path = config_file_path.to_string_lossy().to_string(); - - // Write file contents - let mut file = File::create(temp_file).unwrap(); - writeln!(&mut file, "{}", default_config_toml()).unwrap(); - - path - } - - #[test] - fn configuration_should_be_loaded_from_a_toml_config_file() { - let config_file_path = create_temp_config_file_with_default_config(); - - let configuration = Configuration::load_from_file(&config_file_path).expect("Could not load configuration from file"); - - assert_eq!(configuration, Configuration::default()); - } - - #[test] - fn http_api_configuration_should_check_if_it_contains_a_token() { - let configuration = Configuration::default(); - - assert!(configuration.http_api.contains_token("MyAccessToken")); - assert!(!configuration.http_api.contains_token("NonExistingToken")); + #[allow(clippy::unnecessary_wraps)] + fn default_ssl_key_path() -> Utf8PathBuf { + Utf8PathBuf::new() } } diff --git a/packages/configuration/src/v2_0_0/core.rs b/packages/configuration/src/v2_0_0/core.rs new file mode 100644 index 000000000..ed3e6aeb7 --- /dev/null +++ b/packages/configuration/src/v2_0_0/core.rs @@ -0,0 +1,144 @@ +use derive_more::{Constructor, Display}; +use serde::{Deserialize, Serialize}; + +use super::network::Network; +use crate::v2_0_0::database::Database; +use crate::validator::{SemanticValidationError, Validator}; +use crate::{AnnouncePolicy, TrackerPolicy}; + +#[allow(clippy::struct_excessive_bools)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct Core { + /// Announce policy configuration. + #[serde(default = "Core::default_announce_policy")] + pub announce_policy: AnnouncePolicy, + + /// Database configuration. + #[serde(default = "Core::default_database")] + pub database: Database, + + /// Interval in seconds that the cleanup job will run to remove inactive + /// peers from the torrent peer list. + #[serde(default = "Core::default_inactive_peer_cleanup_interval")] + pub inactive_peer_cleanup_interval: u64, + + /// When `true` only approved torrents can be announced in the tracker. + #[serde(default = "Core::default_listed")] + pub listed: bool, + + /// Network configuration. + #[serde(default = "Core::default_network")] + pub net: Network, + + /// When `true` clients require a key to connect and use the tracker. + #[serde(default = "Core::default_private")] + pub private: bool, + + /// Configuration specific when the tracker is running in private mode. + #[serde(default = "Core::default_private_mode")] + pub private_mode: Option, + + /// Tracker policy configuration. + #[serde(default = "Core::default_tracker_policy")] + pub tracker_policy: TrackerPolicy, + + /// Weather the tracker should collect statistics about tracker usage. + /// If enabled, the tracker will collect statistics like the number of + /// connections handled, the number of announce requests handled, etc. + /// Refer to the [`Tracker`](https://docs.rs/torrust-tracker) for more + /// information about the collected metrics. + #[serde(default = "Core::default_tracker_usage_statistics")] + pub tracker_usage_statistics: bool, +} + +impl Default for Core { + fn default() -> Self { + Self { + announce_policy: Self::default_announce_policy(), + database: Self::default_database(), + inactive_peer_cleanup_interval: Self::default_inactive_peer_cleanup_interval(), + listed: Self::default_listed(), + net: Self::default_network(), + private: Self::default_private(), + private_mode: Self::default_private_mode(), + tracker_policy: Self::default_tracker_policy(), + tracker_usage_statistics: Self::default_tracker_usage_statistics(), + } + } +} + +impl Core { + fn default_announce_policy() -> AnnouncePolicy { + AnnouncePolicy::default() + } + + fn default_database() -> Database { + Database::default() + } + + fn default_inactive_peer_cleanup_interval() -> u64 { + 600 + } + + fn default_listed() -> bool { + false + } + + fn default_network() -> Network { + Network::default() + } + + fn default_private() -> bool { + false + } + + fn default_private_mode() -> Option { + if Self::default_private() { + Some(PrivateMode::default()) + } else { + None + } + } + + fn default_tracker_policy() -> TrackerPolicy { + TrackerPolicy::default() + } + fn default_tracker_usage_statistics() -> bool { + true + } +} + +/// Configuration specific when the tracker is running in private mode. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone, Copy, Constructor, Display)] +pub struct PrivateMode { + /// A flag to disable expiration date for peer keys. + /// + /// When true, if the keys is not permanent the expiration date will be + /// ignored. The key will be accepted even if it has expired. + #[serde(default = "PrivateMode::default_check_keys_expiration")] + pub check_keys_expiration: bool, +} + +impl Default for PrivateMode { + fn default() -> Self { + Self { + check_keys_expiration: Self::default_check_keys_expiration(), + } + } +} + +impl PrivateMode { + fn default_check_keys_expiration() -> bool { + true + } +} + +impl Validator for Core { + fn validate(&self) -> Result<(), SemanticValidationError> { + if self.private_mode.is_some() && !self.private { + return Err(SemanticValidationError::UselessPrivateModeSection); + } + + Ok(()) + } +} diff --git a/packages/configuration/src/v2_0_0/database.rs b/packages/configuration/src/v2_0_0/database.rs new file mode 100644 index 000000000..c2b24d809 --- /dev/null +++ b/packages/configuration/src/v2_0_0/database.rs @@ -0,0 +1,84 @@ +use serde::{Deserialize, Serialize}; +use url::Url; + +#[allow(clippy::struct_excessive_bools)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct Database { + // Database configuration + /// Database driver. Possible values are: `sqlite3`, and `mysql`. + #[serde(default = "Database::default_driver")] + pub driver: Driver, + + /// Database connection string. The format depends on the database driver. + /// For `sqlite3`, the format is `path/to/database.db`, for example: + /// `./storage/tracker/lib/database/sqlite3.db`. + /// For `Mysql`, the format is `mysql://db_user:db_user_password:port/db_name`, for + /// example: `mysql://root:password@localhost:3306/torrust`. + #[serde(default = "Database::default_path")] + pub path: String, +} + +impl Default for Database { + fn default() -> Self { + Self { + driver: Self::default_driver(), + path: Self::default_path(), + } + } +} + +impl Database { + fn default_driver() -> Driver { + Driver::Sqlite3 + } + + fn default_path() -> String { + String::from("./storage/tracker/lib/database/sqlite3.db") + } + + /// Masks secrets in the configuration. + /// + /// # Panics + /// + /// Will panic if the database path for `MySQL` is not a valid URL. + pub fn mask_secrets(&mut self) { + match self.driver { + Driver::Sqlite3 => { + // Nothing to mask + } + Driver::MySQL => { + let mut url = Url::parse(&self.path).expect("path for MySQL driver should be a valid URL"); + url.set_password(Some("***")).expect("url password should be changed"); + self.path = url.to_string(); + } + } + } +} + +/// The database management system used by the tracker. +#[derive(Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Debug, Hash, Clone)] +#[serde(rename_all = "lowercase")] +pub enum Driver { + /// The `Sqlite3` database driver. + Sqlite3, + /// The `MySQL` database driver. + MySQL, +} + +#[cfg(test)] +mod tests { + + use super::{Database, Driver}; + + #[test] + fn it_should_allow_masking_the_mysql_user_password() { + let mut database = Database { + driver: Driver::MySQL, + path: "mysql://root:password@localhost:3306/torrust".to_string(), + }; + + database.mask_secrets(); + + assert_eq!(database.path, "mysql://root:***@localhost:3306/torrust".to_string()); + } +} diff --git a/packages/configuration/src/v2_0_0/health_check_api.rs b/packages/configuration/src/v2_0_0/health_check_api.rs new file mode 100644 index 000000000..61178fa80 --- /dev/null +++ b/packages/configuration/src/v2_0_0/health_check_api.rs @@ -0,0 +1,30 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use serde::{Deserialize, Serialize}; +use serde_with::serde_as; + +/// Configuration for the Health Check API. +#[serde_as] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct HealthCheckApi { + /// The address the API will bind to. + /// The format is `ip:port`, for example `127.0.0.1:1313`. If you want to + /// listen to all interfaces, use `0.0.0.0`. If you want the operating + /// system to choose a random port, use port `0`. + #[serde(default = "HealthCheckApi::default_bind_address")] + pub bind_address: SocketAddr, +} + +impl Default for HealthCheckApi { + fn default() -> Self { + Self { + bind_address: Self::default_bind_address(), + } + } +} + +impl HealthCheckApi { + fn default_bind_address() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 1313) + } +} diff --git a/packages/configuration/src/v2_0_0/http_tracker.rs b/packages/configuration/src/v2_0_0/http_tracker.rs new file mode 100644 index 000000000..42ec02bf2 --- /dev/null +++ b/packages/configuration/src/v2_0_0/http_tracker.rs @@ -0,0 +1,41 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use serde::{Deserialize, Serialize}; +use serde_with::serde_as; + +use crate::TslConfig; + +/// Configuration for each HTTP tracker. +#[serde_as] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct HttpTracker { + /// The address the tracker will bind to. + /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to + /// listen to all interfaces, use `0.0.0.0`. If you want the operating + /// system to choose a random port, use port `0`. + #[serde(default = "HttpTracker::default_bind_address")] + pub bind_address: SocketAddr, + + /// TSL config. + #[serde(default = "HttpTracker::default_tsl_config")] + pub tsl_config: Option, +} + +impl Default for HttpTracker { + fn default() -> Self { + Self { + bind_address: Self::default_bind_address(), + tsl_config: Self::default_tsl_config(), + } + } +} + +impl HttpTracker { + fn default_bind_address() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 7070) + } + + fn default_tsl_config() -> Option { + None + } +} diff --git a/packages/configuration/src/v2_0_0/logging.rs b/packages/configuration/src/v2_0_0/logging.rs new file mode 100644 index 000000000..e7dbe146c --- /dev/null +++ b/packages/configuration/src/v2_0_0/logging.rs @@ -0,0 +1,41 @@ +use serde::{Deserialize, Serialize}; + +#[allow(clippy::struct_excessive_bools)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct Logging { + /// Logging level. Possible values are: `Off`, `Error`, `Warn`, `Info`, + /// `Debug` and `Trace`. Default is `Info`. + #[serde(default = "Logging::default_threshold")] + pub threshold: Threshold, +} + +impl Default for Logging { + fn default() -> Self { + Self { + threshold: Self::default_threshold(), + } + } +} + +impl Logging { + fn default_threshold() -> Threshold { + Threshold::Info + } +} + +#[derive(Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Debug, Hash, Clone)] +#[serde(rename_all = "lowercase")] +pub enum Threshold { + /// A threshold lower than all security levels. + Off, + /// Corresponds to the `Error` security level. + Error, + /// Corresponds to the `Warn` security level. + Warn, + /// Corresponds to the `Info` security level. + Info, + /// Corresponds to the `Debug` security level. + Debug, + /// Corresponds to the `Trace` security level. + Trace, +} diff --git a/packages/configuration/src/v2_0_0/mod.rs b/packages/configuration/src/v2_0_0/mod.rs new file mode 100644 index 000000000..5067210bb --- /dev/null +++ b/packages/configuration/src/v2_0_0/mod.rs @@ -0,0 +1,672 @@ +//! Version `1` for [Torrust Tracker](https://docs.rs/torrust-tracker) +//! configuration data structures. +//! +//! This module contains the configuration data structures for the +//! Torrust Tracker, which is a `BitTorrent` tracker server. +//! +//! The configuration is loaded from a [TOML](https://toml.io/en/) file +//! `tracker.toml` in the project root folder or from an environment variable +//! with the same content as the file. +//! +//! Configuration can not only be loaded from a file, but also from an +//! environment variable `TORRUST_TRACKER_CONFIG_TOML`. This is useful when running +//! the tracker in a Docker container or environments where you do not have a +//! persistent storage or you cannot inject a configuration file. Refer to +//! [`Torrust Tracker documentation`](https://docs.rs/torrust-tracker) for more +//! information about how to pass configuration to the tracker. +//! +//! When you run the tracker without providing the configuration via a file or +//! env var, the default configuration is used. +//! +//! # Table of contents +//! +//! - [Sections](#sections) +//! - [Port binding](#port-binding) +//! - [TSL support](#tsl-support) +//! - [Generating self-signed certificates](#generating-self-signed-certificates) +//! - [Default configuration](#default-configuration) +//! +//! ## Sections +//! +//! Each section in the toml structure is mapped to a data structure. For +//! example, the `[http_api]` section (configuration for the tracker HTTP API) +//! is mapped to the [`HttpApi`] structure. +//! +//! > **NOTICE**: some sections are arrays of structures. For example, the +//! > `[[udp_trackers]]` section is an array of [`UdpTracker`] since +//! > you can have multiple running UDP trackers bound to different ports. +//! +//! Please refer to the documentation of each structure for more information +//! about each section. +//! +//! - [`Core configuration`](crate::v2::Configuration) +//! - [`HTTP API configuration`](crate::v2::tracker_api::HttpApi) +//! - [`HTTP Tracker configuration`](crate::v2::http_tracker::HttpTracker) +//! - [`UDP Tracker configuration`](crate::v2::udp_tracker::UdpTracker) +//! - [`Health Check API configuration`](crate::v2::health_check_api::HealthCheckApi) +//! +//! ## Port binding +//! +//! For the API, HTTP and UDP trackers you can bind to a random port by using +//! port `0`. For example, if you want to bind to a random port on all +//! interfaces, use `0.0.0.0:0`. The OS will choose a random free port. +//! +//! ## TSL support +//! +//! For the API and HTTP tracker you can enable TSL by setting `ssl_enabled` to +//! `true` and setting the paths to the certificate and key files. +//! +//! Typically, you will have a `storage` directory like the following: +//! +//! ```text +//! storage/ +//! ├── config.toml +//! └── tracker +//! ├── etc +//! │ └── tracker.toml +//! ├── lib +//! │ ├── database +//! │ │ ├── sqlite3.db +//! │ │ └── sqlite.db +//! │ └── tls +//! │ ├── localhost.crt +//! │ └── localhost.key +//! └── log +//! ``` +//! +//! where the application stores all the persistent data. +//! +//! Alternatively, you could setup a reverse proxy like Nginx or Apache to +//! handle the SSL/TLS part and forward the requests to the tracker. If you do +//! that, you should set [`on_reverse_proxy`](crate::v2::network::Network::on_reverse_proxy) +//! to `true` in the configuration file. It's out of scope for this +//! documentation to explain in detail how to setup a reverse proxy, but the +//! configuration file should be something like this: +//! +//! For [NGINX](https://docs.nginx.com/nginx/admin-guide/web-server/reverse-proxy/): +//! +//! ```text +//! # HTTPS only (with SSL - force redirect to HTTPS) +//! +//! server { +//! listen 80; +//! server_name tracker.torrust.com; +//! +//! return 301 https://$host$request_uri; +//! } +//! +//! server { +//! listen 443; +//! server_name tracker.torrust.com; +//! +//! ssl_certificate CERT_PATH +//! ssl_certificate_key CERT_KEY_PATH; +//! +//! location / { +//! proxy_set_header X-Forwarded-For $remote_addr; +//! proxy_pass http://127.0.0.1:6969; +//! } +//! } +//! ``` +//! +//! For [Apache](https://httpd.apache.org/docs/2.4/howto/reverse_proxy.html): +//! +//! ```text +//! # HTTPS only (with SSL - force redirect to HTTPS) +//! +//! +//! ServerAdmin webmaster@tracker.torrust.com +//! ServerName tracker.torrust.com +//! +//! +//! RewriteEngine on +//! RewriteCond %{HTTPS} off +//! RewriteRule ^ https://%{SERVER_NAME}%{REQUEST_URI} [END,NE,R=permanent] +//! +//! +//! +//! +//! +//! ServerAdmin webmaster@tracker.torrust.com +//! ServerName tracker.torrust.com +//! +//! +//! Order allow,deny +//! Allow from all +//! +//! +//! ProxyPreserveHost On +//! ProxyRequests Off +//! AllowEncodedSlashes NoDecode +//! +//! ProxyPass / http://localhost:3000/ +//! ProxyPassReverse / http://localhost:3000/ +//! ProxyPassReverse / http://tracker.torrust.com/ +//! +//! RequestHeader set X-Forwarded-Proto "https" +//! RequestHeader set X-Forwarded-Port "443" +//! +//! ErrorLog ${APACHE_LOG_DIR}/tracker.torrust.com-error.log +//! CustomLog ${APACHE_LOG_DIR}/tracker.torrust.com-access.log combined +//! +//! SSLCertificateFile CERT_PATH +//! SSLCertificateKeyFile CERT_KEY_PATH +//! +//! +//! ``` +//! +//! ## Generating self-signed certificates +//! +//! For testing purposes, you can use self-signed certificates. +//! +//! Refer to [Let's Encrypt - Certificates for localhost](https://letsencrypt.org/docs/certificates-for-localhost/) +//! for more information. +//! +//! Running the following command will generate a certificate (`localhost.crt`) +//! and key (`localhost.key`) file in your current directory: +//! +//! ```s +//! openssl req -x509 -out localhost.crt -keyout localhost.key \ +//! -newkey rsa:2048 -nodes -sha256 \ +//! -subj '/CN=localhost' -extensions EXT -config <( \ +//! printf "[dn]\nCN=localhost\n[req]\ndistinguished_name = dn\n[EXT]\nsubjectAltName=DNS:localhost\nkeyUsage=digitalSignature\nextendedKeyUsage=serverAuth") +//! ``` +//! +//! You can then use the generated files in the configuration file: +//! +//! ```s +//! [[http_trackers]] +//! ... +//! +//! [http_trackers.tsl_config] +//! ssl_cert_path = "./storage/tracker/lib/tls/localhost.crt" +//! ssl_key_path = "./storage/tracker/lib/tls/localhost.key" +//! +//! [http_api] +//! ... +//! +//! [http_api.tsl_config] +//! ssl_cert_path = "./storage/tracker/lib/tls/localhost.crt" +//! ssl_key_path = "./storage/tracker/lib/tls/localhost.key" +//! ``` +//! +//! ## Default configuration +//! +//! The default configuration is: +//! +//! ```toml +//! [logging] +//! threshold = "info" +//! +//! [core] +//! inactive_peer_cleanup_interval = 600 +//! listed = false +//! private = false +//! tracker_usage_statistics = true +//! +//! [core.announce_policy] +//! interval = 120 +//! interval_min = 120 +//! +//! [core.database] +//! driver = "sqlite3" +//! path = "./storage/tracker/lib/database/sqlite3.db" +//! +//! [core.net] +//! external_ip = "0.0.0.0" +//! on_reverse_proxy = false +//! +//! [core.tracker_policy] +//! max_peer_timeout = 900 +//! persistent_torrent_completed_stat = false +//! remove_peerless_torrents = true +//! +//! [http_api] +//! bind_address = "127.0.0.1:1212" +//! +//! [http_api.access_tokens] +//! admin = "MyAccessToken" +//! [health_check_api] +//! bind_address = "127.0.0.1:1313" +//!``` +pub mod core; +pub mod database; +pub mod health_check_api; +pub mod http_tracker; +pub mod logging; +pub mod network; +pub mod tracker_api; +pub mod udp_tracker; + +use std::fs; +use std::net::IpAddr; + +use figment::providers::{Env, Format, Serialized, Toml}; +use figment::Figment; +use logging::Logging; +use serde::{Deserialize, Serialize}; + +use self::core::Core; +use self::health_check_api::HealthCheckApi; +use self::http_tracker::HttpTracker; +use self::tracker_api::HttpApi; +use self::udp_tracker::UdpTracker; +use crate::validator::{SemanticValidationError, Validator}; +use crate::{Error, Info, Metadata, Version}; + +/// This configuration version +const VERSION_2_0_0: &str = "2.0.0"; + +/// Prefix for env vars that overwrite configuration options. +const CONFIG_OVERRIDE_PREFIX: &str = "TORRUST_TRACKER_CONFIG_OVERRIDE_"; + +/// Path separator in env var names for nested values in configuration. +const CONFIG_OVERRIDE_SEPARATOR: &str = "__"; + +/// Core configuration for the tracker. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Default, Clone)] +pub struct Configuration { + /// Configuration metadata. + pub metadata: Metadata, + + /// Logging configuration + pub logging: Logging, + + /// Core configuration. + pub core: Core, + + /// The list of UDP trackers the tracker is running. Each UDP tracker + /// represents a UDP server that the tracker is running and it has its own + /// configuration. + pub udp_trackers: Option>, + + /// The list of HTTP trackers the tracker is running. Each HTTP tracker + /// represents a HTTP server that the tracker is running and it has its own + /// configuration. + pub http_trackers: Option>, + + /// The HTTP API configuration. + pub http_api: Option, + + /// The Health Check API configuration. + pub health_check_api: HealthCheckApi, +} + +impl Configuration { + /// Returns the tracker public IP address id defined in the configuration, + /// and `None` otherwise. + #[must_use] + pub fn get_ext_ip(&self) -> Option { + self.core.net.external_ip.as_ref().map(|external_ip| *external_ip) + } + + /// Saves the default configuration at the given path. + /// + /// # Errors + /// + /// Will return `Err` if `path` is not a valid path or the configuration + /// file cannot be created. + pub fn create_default_configuration_file(path: &str) -> Result { + let config = Configuration::default(); + config.save_to_file(path)?; + Ok(config) + } + + /// Loads the configuration from the `Info` struct. The whole + /// configuration in toml format is included in the `info.tracker_toml` + /// string. + /// + /// Configuration provided via env var has priority over config file path. + /// + /// # Errors + /// + /// Will return `Err` if the environment variable does not exist or has a bad configuration. + pub fn load(info: &Info) -> Result { + // Load configuration provided by the user, prioritizing env vars + let figment = if let Some(config_toml) = &info.config_toml { + Figment::from(Toml::string(config_toml)).merge(Env::prefixed(CONFIG_OVERRIDE_PREFIX).split(CONFIG_OVERRIDE_SEPARATOR)) + } else { + Figment::from(Toml::file(&info.config_toml_path)) + .merge(Env::prefixed(CONFIG_OVERRIDE_PREFIX).split(CONFIG_OVERRIDE_SEPARATOR)) + }; + + // Make sure user has provided the mandatory options. + Self::check_mandatory_options(&figment)?; + + // Fill missing options with default values. + let figment = figment.join(Serialized::defaults(Configuration::default())); + + // Build final configuration. + let config: Configuration = figment.extract()?; + + // Make sure the provided schema version matches this version. + if config.metadata.schema_version != Version::new(VERSION_2_0_0) { + return Err(Error::UnsupportedVersion { + version: config.metadata.schema_version, + }); + } + + Ok(config) + } + + /// Some configuration options are mandatory. The tracker will panic if + /// the user doesn't provide an explicit value for them from one of the + /// configuration sources: TOML or ENV VARS. + /// + /// # Errors + /// + /// Will return an error if a mandatory configuration option is only + /// obtained by default value (code), meaning the user hasn't overridden it. + fn check_mandatory_options(figment: &Figment) -> Result<(), Error> { + let mandatory_options = ["metadata.schema_version", "logging.threshold", "core.private", "core.listed"]; + + for mandatory_option in mandatory_options { + figment + .find_value(mandatory_option) + .map_err(|_err| Error::MissingMandatoryOption { + path: mandatory_option.to_owned(), + })?; + } + + Ok(()) + } + + /// Saves the configuration to the configuration file. + /// + /// # Errors + /// + /// Will return `Err` if `filename` does not exist or the user does not have + /// permission to read it. Will also return `Err` if the configuration is + /// not valid or cannot be encoded to TOML. + /// + /// # Panics + /// + /// Will panic if the configuration cannot be written into the file. + pub fn save_to_file(&self, path: &str) -> Result<(), Error> { + fs::write(path, self.to_toml()).expect("Could not write to file!"); + Ok(()) + } + + /// Encodes the configuration to TOML. + /// + /// # Panics + /// + /// Will panic if it can't be converted to TOML. + #[must_use] + fn to_toml(&self) -> String { + // code-review: do we need to use Figment also to serialize into toml? + toml::to_string(self).expect("Could not encode TOML value") + } + + /// Encodes the configuration to JSON. + /// + /// # Panics + /// + /// Will panic if it can't be converted to JSON. + #[must_use] + pub fn to_json(&self) -> String { + // code-review: do we need to use Figment also to serialize into json? + serde_json::to_string_pretty(self).expect("Could not encode JSON value") + } + + /// Masks secrets in the configuration. + #[must_use] + pub fn mask_secrets(mut self) -> Self { + self.core.database.mask_secrets(); + + if let Some(ref mut api) = self.http_api { + api.mask_secrets(); + } + + self + } +} + +impl Validator for Configuration { + fn validate(&self) -> Result<(), SemanticValidationError> { + self.core.validate() + } +} + +#[cfg(test)] +mod tests { + + use std::net::{IpAddr, Ipv4Addr}; + + use crate::v2_0_0::Configuration; + use crate::Info; + + #[cfg(test)] + fn default_config_toml() -> String { + let config = r#"[metadata] + app = "torrust-tracker" + purpose = "configuration" + schema_version = "2.0.0" + + [logging] + threshold = "info" + + [core] + inactive_peer_cleanup_interval = 600 + listed = false + private = false + tracker_usage_statistics = true + + [core.announce_policy] + interval = 120 + interval_min = 120 + + [core.database] + driver = "sqlite3" + path = "./storage/tracker/lib/database/sqlite3.db" + + [core.net] + external_ip = "0.0.0.0" + on_reverse_proxy = false + + [core.tracker_policy] + max_peer_timeout = 900 + persistent_torrent_completed_stat = false + remove_peerless_torrents = true + + [health_check_api] + bind_address = "127.0.0.1:1313" + "# + .lines() + .map(str::trim_start) + .collect::>() + .join("\n"); + config + } + + #[test] + fn configuration_should_have_default_values() { + let configuration = Configuration::default(); + + let toml = toml::to_string(&configuration).expect("Could not encode TOML value"); + + assert_eq!(toml, default_config_toml()); + } + + #[test] + fn configuration_should_contain_the_external_ip() { + let configuration = Configuration::default(); + + assert_eq!( + configuration.core.net.external_ip, + Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))) + ); + } + + #[test] + fn configuration_should_be_saved_in_a_toml_config_file() { + use std::{env, fs}; + + use uuid::Uuid; + + // Build temp config file path + let temp_directory = env::temp_dir(); + let temp_file = temp_directory.join(format!("test_config_{}.toml", Uuid::new_v4())); + + // Convert to argument type for Configuration::save_to_file + let config_file_path = temp_file; + let path = config_file_path.to_string_lossy().to_string(); + + let default_configuration = Configuration::default(); + + default_configuration + .save_to_file(&path) + .expect("Could not save configuration to file"); + + let contents = fs::read_to_string(&path).expect("Something went wrong reading the file"); + + assert_eq!(contents, default_config_toml()); + } + + #[test] + fn configuration_should_use_the_default_values_when_only_the_mandatory_options_are_provided_by_the_user_via_toml_file() { + figment::Jail::expect_with(|jail| { + jail.create_file( + "tracker.toml", + r#" + [metadata] + schema_version = "2.0.0" + + [logging] + threshold = "info" + + [core] + listed = false + private = false + "#, + )?; + + let info = Info { + config_toml: None, + config_toml_path: "tracker.toml".to_string(), + }; + + let configuration = Configuration::load(&info).expect("Could not load configuration from file"); + + assert_eq!(configuration, Configuration::default()); + + Ok(()) + }); + } + + #[test] + fn configuration_should_use_the_default_values_when_only_the_mandatory_options_are_provided_by_the_user_via_toml_content() { + figment::Jail::expect_with(|_jail| { + let config_toml = r#" + [metadata] + schema_version = "2.0.0" + + [logging] + threshold = "info" + + [core] + listed = false + private = false + "# + .to_string(); + + let info = Info { + config_toml: Some(config_toml), + config_toml_path: String::new(), + }; + + let configuration = Configuration::load(&info).expect("Could not load configuration from file"); + + assert_eq!(configuration, Configuration::default()); + + Ok(()) + }); + } + + #[test] + fn default_configuration_could_be_overwritten_from_a_single_env_var_with_toml_contents() { + figment::Jail::expect_with(|_jail| { + let config_toml = r#" + [metadata] + schema_version = "2.0.0" + + [logging] + threshold = "info" + + [core] + listed = false + private = false + + [core.database] + path = "OVERWRITTEN DEFAULT DB PATH" + "# + .to_string(); + + let info = Info { + config_toml: Some(config_toml), + config_toml_path: String::new(), + }; + + let configuration = Configuration::load(&info).expect("Could not load configuration from file"); + + assert_eq!(configuration.core.database.path, "OVERWRITTEN DEFAULT DB PATH".to_string()); + + Ok(()) + }); + } + + #[test] + fn default_configuration_could_be_overwritten_from_a_toml_config_file() { + figment::Jail::expect_with(|jail| { + jail.create_file( + "tracker.toml", + r#" + [metadata] + schema_version = "2.0.0" + + [logging] + threshold = "info" + + [core] + listed = false + private = false + + [core.database] + path = "OVERWRITTEN DEFAULT DB PATH" + "#, + )?; + + let info = Info { + config_toml: None, + config_toml_path: "tracker.toml".to_string(), + }; + + let configuration = Configuration::load(&info).expect("Could not load configuration from file"); + + assert_eq!(configuration.core.database.path, "OVERWRITTEN DEFAULT DB PATH".to_string()); + + Ok(()) + }); + } + + #[test] + fn configuration_should_allow_to_overwrite_the_default_tracker_api_token_for_admin_with_an_env_var() { + figment::Jail::expect_with(|jail| { + jail.set_env("TORRUST_TRACKER_CONFIG_OVERRIDE_HTTP_API__ACCESS_TOKENS__ADMIN", "NewToken"); + + let info = Info { + config_toml: Some(default_config_toml()), + config_toml_path: String::new(), + }; + + let configuration = Configuration::load(&info).expect("Could not load configuration from file"); + + assert_eq!( + configuration.http_api.unwrap().access_tokens.get("admin"), + Some("NewToken".to_owned()).as_ref() + ); + + Ok(()) + }); + } +} diff --git a/packages/configuration/src/v2_0_0/network.rs b/packages/configuration/src/v2_0_0/network.rs new file mode 100644 index 000000000..8e53d419c --- /dev/null +++ b/packages/configuration/src/v2_0_0/network.rs @@ -0,0 +1,41 @@ +use std::net::{IpAddr, Ipv4Addr}; + +use serde::{Deserialize, Serialize}; + +#[allow(clippy::struct_excessive_bools)] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct Network { + /// The external IP address of the tracker. If the client is using a + /// loopback IP address, this IP address will be used instead. If the peer + /// is using a loopback IP address, the tracker assumes that the peer is + /// in the same network as the tracker and will use the tracker's IP + /// address instead. + #[serde(default = "Network::default_external_ip")] + pub external_ip: Option, + + /// Weather the tracker is behind a reverse proxy or not. + /// If the tracker is behind a reverse proxy, the `X-Forwarded-For` header + /// sent from the proxy will be used to get the client's IP address. + #[serde(default = "Network::default_on_reverse_proxy")] + pub on_reverse_proxy: bool, +} + +impl Default for Network { + fn default() -> Self { + Self { + external_ip: Self::default_external_ip(), + on_reverse_proxy: Self::default_on_reverse_proxy(), + } + } +} + +impl Network { + #[allow(clippy::unnecessary_wraps)] + fn default_external_ip() -> Option { + Some(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))) + } + + fn default_on_reverse_proxy() -> bool { + false + } +} diff --git a/packages/configuration/src/v2_0_0/tracker_api.rs b/packages/configuration/src/v2_0_0/tracker_api.rs new file mode 100644 index 000000000..2da21758b --- /dev/null +++ b/packages/configuration/src/v2_0_0/tracker_api.rs @@ -0,0 +1,88 @@ +use std::collections::HashMap; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use serde::{Deserialize, Serialize}; +use serde_with::serde_as; + +use crate::TslConfig; + +pub type AccessTokens = HashMap; + +/// Configuration for the HTTP API. +#[serde_as] +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct HttpApi { + /// The address the tracker will bind to. + /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to + /// listen to all interfaces, use `0.0.0.0`. If you want the operating + /// system to choose a random port, use port `0`. + #[serde(default = "HttpApi::default_bind_address")] + pub bind_address: SocketAddr, + + /// TSL config. Only used if `ssl_enabled` is true. + #[serde(default = "HttpApi::default_tsl_config")] + pub tsl_config: Option, + + /// Access tokens for the HTTP API. The key is a label identifying the + /// token and the value is the token itself. The token is used to + /// authenticate the user. All tokens are valid for all endpoints and have + /// all permissions. + #[serde(default = "HttpApi::default_access_tokens")] + pub access_tokens: AccessTokens, +} + +impl Default for HttpApi { + fn default() -> Self { + Self { + bind_address: Self::default_bind_address(), + tsl_config: Self::default_tsl_config(), + access_tokens: Self::default_access_tokens(), + } + } +} + +impl HttpApi { + fn default_bind_address() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 1212) + } + + #[allow(clippy::unnecessary_wraps)] + fn default_tsl_config() -> Option { + None + } + + fn default_access_tokens() -> AccessTokens { + [].iter().cloned().collect() + } + + pub fn add_token(&mut self, key: &str, token: &str) { + self.access_tokens.insert(key.to_string(), token.to_string()); + } + + pub fn mask_secrets(&mut self) { + for token in self.access_tokens.values_mut() { + *token = "***".to_string(); + } + } +} + +#[cfg(test)] +mod tests { + use crate::v2_0_0::tracker_api::HttpApi; + + #[test] + fn default_http_api_configuration_should_not_contains_any_token() { + let configuration = HttpApi::default(); + + assert_eq!(configuration.access_tokens.values().len(), 0); + } + + #[test] + fn http_api_configuration_should_allow_adding_tokens() { + let mut configuration = HttpApi::default(); + + configuration.add_token("admin", "MyAccessToken"); + + assert!(configuration.access_tokens.values().any(|t| t == "MyAccessToken")); + } +} diff --git a/packages/configuration/src/v2_0_0/udp_tracker.rs b/packages/configuration/src/v2_0_0/udp_tracker.rs new file mode 100644 index 000000000..b3d420d72 --- /dev/null +++ b/packages/configuration/src/v2_0_0/udp_tracker.rs @@ -0,0 +1,26 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, Clone)] +pub struct UdpTracker { + /// The address the tracker will bind to. + /// The format is `ip:port`, for example `0.0.0.0:6969`. If you want to + /// listen to all interfaces, use `0.0.0.0`. If you want the operating + /// system to choose a random port, use port `0`. + #[serde(default = "UdpTracker::default_bind_address")] + pub bind_address: SocketAddr, +} +impl Default for UdpTracker { + fn default() -> Self { + Self { + bind_address: Self::default_bind_address(), + } + } +} + +impl UdpTracker { + fn default_bind_address() -> SocketAddr { + SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), 6969) + } +} diff --git a/packages/configuration/src/validator.rs b/packages/configuration/src/validator.rs new file mode 100644 index 000000000..4555b88dd --- /dev/null +++ b/packages/configuration/src/validator.rs @@ -0,0 +1,19 @@ +//! Trait to validate semantic errors. +//! +//! Errors could involve more than one configuration option. Some configuration +//! combinations can be incompatible. +use thiserror::Error; + +/// Errors that can occur validating the configuration. +#[derive(Error, Debug)] +pub enum SemanticValidationError { + #[error("Private mode section in configuration can only be included when the tracker is running in private mode.")] + UselessPrivateModeSection, +} + +pub trait Validator { + /// # Errors + /// + /// Will return an error if the configuration is invalid. + fn validate(&self) -> Result<(), SemanticValidationError>; +} diff --git a/packages/located-error/Cargo.toml b/packages/located-error/Cargo.toml index b4c813df3..637ea3055 100644 --- a/packages/located-error/Cargo.toml +++ b/packages/located-error/Cargo.toml @@ -8,14 +8,14 @@ authors.workspace = true documentation.workspace = true edition.workspace = true homepage.workspace = true -license-file.workspace = true +license.workspace = true publish.workspace = true repository.workspace = true rust-version.workspace = true version.workspace = true [dependencies] -log = { version = "0.4", features = ["release_max_level_info"] } +tracing = "0" [dev-dependencies] -thiserror = "1.0" +thiserror = "1" diff --git a/packages/located-error/src/lib.rs b/packages/located-error/src/lib.rs index bf8618686..bfd4d4a86 100644 --- a/packages/located-error/src/lib.rs +++ b/packages/located-error/src/lib.rs @@ -33,6 +33,10 @@ use std::error::Error; use std::panic::Location; use std::sync::Arc; +use tracing::debug; + +pub type DynError = Arc; + /// A generic wrapper around an error. /// /// Where `E` is the inner error (source error). @@ -90,13 +94,13 @@ where source: Arc::new(self.0), location: Box::new(*std::panic::Location::caller()), }; - log::debug!("{e}"); + debug!("{e}"); e } } #[allow(clippy::from_over_into)] -impl<'a> Into> for Arc { +impl<'a> Into> for DynError { #[track_caller] fn into(self) -> LocatedError<'a, dyn std::error::Error + Send + Sync> { LocatedError { diff --git a/packages/primitives/Cargo.toml b/packages/primitives/Cargo.toml index ce6c20ff0..174750fbb 100644 --- a/packages/primitives/Cargo.toml +++ b/packages/primitives/Cargo.toml @@ -8,12 +8,16 @@ authors.workspace = true documentation.workspace = true edition.workspace = true homepage.workspace = true -license-file.workspace = true +license.workspace = true publish.workspace = true repository.workspace = true rust-version.workspace = true version.workspace = true [dependencies] -derive_more = "0.99" -serde = { version = "1.0", features = ["derive"] } +binascii = "0" +derive_more = "0" +serde = { version = "1", features = ["derive"] } +tdyne-peer-id = "1" +tdyne-peer-id-registry = "0" +thiserror = "1" diff --git a/packages/primitives/src/announce_event.rs b/packages/primitives/src/announce_event.rs new file mode 100644 index 000000000..3bd560084 --- /dev/null +++ b/packages/primitives/src/announce_event.rs @@ -0,0 +1,43 @@ +//! Copyright (c) 2020-2023 Joakim FrostegÃ¥rd and The Torrust Developers +//! +//! Distributed under Apache 2.0 license + +use serde::{Deserialize, Serialize}; + +/// Announce events. Described on the +/// [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +#[derive(Hash, Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub enum AnnounceEvent { + /// The peer has started downloading the torrent. + Started, + /// The peer has ceased downloading the torrent. + Stopped, + /// The peer has completed downloading the torrent. + Completed, + /// This is one of the announcements done at regular intervals. + None, +} + +impl AnnounceEvent { + #[inline] + #[must_use] + pub fn from_i32(i: i32) -> Self { + match i { + 1 => Self::Completed, + 2 => Self::Started, + 3 => Self::Stopped, + _ => Self::None, + } + } + + #[inline] + #[must_use] + pub fn to_i32(&self) -> i32 { + match self { + AnnounceEvent::None => 0, + AnnounceEvent::Completed => 1, + AnnounceEvent::Started => 2, + AnnounceEvent::Stopped => 3, + } + } +} diff --git a/packages/primitives/src/info_hash.rs b/packages/primitives/src/info_hash.rs new file mode 100644 index 000000000..a07cc41a2 --- /dev/null +++ b/packages/primitives/src/info_hash.rs @@ -0,0 +1,184 @@ +use std::hash::{DefaultHasher, Hash, Hasher}; +use std::panic::Location; + +use thiserror::Error; + +/// `BitTorrent` Info Hash v1 +#[derive(PartialEq, Eq, Hash, Clone, Copy, Default, Debug)] +pub struct InfoHash(pub [u8; 20]); + +pub const INFO_HASH_BYTES_LEN: usize = 20; + +impl InfoHash { + /// Create a new `InfoHash` from a byte slice. + /// + /// # Panics + /// + /// Will panic if byte slice does not contains the exact amount of bytes need for the `InfoHash`. + #[must_use] + pub fn from_bytes(bytes: &[u8]) -> Self { + assert_eq!(bytes.len(), INFO_HASH_BYTES_LEN); + let mut ret = Self([0u8; INFO_HASH_BYTES_LEN]); + ret.0.clone_from_slice(bytes); + ret + } + + /// Returns the `InfoHash` internal byte array. + #[must_use] + pub fn bytes(&self) -> [u8; 20] { + self.0 + } + + /// Returns the `InfoHash` as a hex string. + #[must_use] + pub fn to_hex_string(&self) -> String { + self.to_string() + } +} + +impl Ord for InfoHash { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.0.cmp(&other.0) + } +} + +impl std::cmp::PartialOrd for InfoHash { + fn partial_cmp(&self, other: &InfoHash) -> Option { + Some(self.cmp(other)) + } +} + +impl std::fmt::Display for InfoHash { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut chars = [0u8; 40]; + binascii::bin2hex(&self.0, &mut chars).expect("failed to hexlify"); + write!(f, "{}", std::str::from_utf8(&chars).unwrap()) + } +} + +impl std::str::FromStr for InfoHash { + type Err = binascii::ConvertError; + + fn from_str(s: &str) -> Result { + let mut i = Self([0u8; 20]); + if s.len() != 40 { + return Err(binascii::ConvertError::InvalidInputLength); + } + binascii::hex2bin(s.as_bytes(), &mut i.0)?; + Ok(i) + } +} + +impl std::convert::From<&[u8]> for InfoHash { + fn from(data: &[u8]) -> InfoHash { + assert_eq!(data.len(), 20); + let mut ret = InfoHash([0u8; 20]); + ret.0.clone_from_slice(data); + ret + } +} + +/// for testing +impl std::convert::From<&DefaultHasher> for InfoHash { + fn from(data: &DefaultHasher) -> InfoHash { + let n = data.finish().to_le_bytes(); + InfoHash([ + n[0], n[1], n[2], n[3], n[4], n[5], n[6], n[7], n[0], n[1], n[2], n[3], n[4], n[5], n[6], n[7], n[0], n[1], n[2], + n[3], + ]) + } +} + +impl std::convert::From<&i32> for InfoHash { + fn from(n: &i32) -> InfoHash { + let n = n.to_le_bytes(); + InfoHash([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, n[0], n[1], n[2], n[3]]) + } +} + +impl std::convert::From<[u8; 20]> for InfoHash { + fn from(val: [u8; 20]) -> Self { + InfoHash(val) + } +} + +/// Errors that can occur when converting from a `Vec` to an `InfoHash`. +#[derive(Error, Debug)] +pub enum ConversionError { + /// Not enough bytes for infohash. An infohash is 20 bytes. + #[error("not enough bytes for infohash: {message} {location}")] + NotEnoughBytes { + location: &'static Location<'static>, + message: String, + }, + /// Too many bytes for infohash. An infohash is 20 bytes. + #[error("too many bytes for infohash: {message} {location}")] + TooManyBytes { + location: &'static Location<'static>, + message: String, + }, +} + +impl TryFrom> for InfoHash { + type Error = ConversionError; + + fn try_from(bytes: Vec) -> Result { + if bytes.len() < INFO_HASH_BYTES_LEN { + return Err(ConversionError::NotEnoughBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, + }); + } + if bytes.len() > INFO_HASH_BYTES_LEN { + return Err(ConversionError::TooManyBytes { + location: Location::caller(), + message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, + }); + } + Ok(Self::from_bytes(&bytes)) + } +} + +impl serde::ser::Serialize for InfoHash { + fn serialize(&self, serializer: S) -> Result { + let mut buffer = [0u8; 40]; + let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); + let str_out = std::str::from_utf8(bytes_out).unwrap(); + serializer.serialize_str(str_out) + } +} + +impl<'de> serde::de::Deserialize<'de> for InfoHash { + fn deserialize>(des: D) -> Result { + des.deserialize_str(InfoHashVisitor) + } +} + +struct InfoHashVisitor; + +impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { + type Value = InfoHash; + + fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "a 40 character long hash") + } + + fn visit_str(self, v: &str) -> Result { + if v.len() != 40 { + return Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Str(v), + &"a 40 character long string", + )); + } + + let mut res = InfoHash([0u8; 20]); + + if binascii::hex2bin(v.as_bytes(), &mut res.0).is_err() { + return Err(serde::de::Error::invalid_value( + serde::de::Unexpected::Str(v), + &"a hexadecimal string", + )); + }; + Ok(res) + } +} diff --git a/packages/primitives/src/lib.rs b/packages/primitives/src/lib.rs index e6f8cb93b..d6f29c2b5 100644 --- a/packages/primitives/src/lib.rs +++ b/packages/primitives/src/lib.rs @@ -4,44 +4,42 @@ //! which is a `BitTorrent` tracker server. These structures are used not only //! by the tracker server crate, but also by other crates in the Torrust //! ecosystem. +use std::collections::BTreeMap; +use std::time::Duration; + +use info_hash::InfoHash; use serde::{Deserialize, Serialize}; -/// The database management system used by the tracker. -/// -/// Refer to: -/// -/// - [Torrust Tracker Configuration](https://docs.rs/torrust-tracker-configuration). -/// - [Torrust Tracker](https://docs.rs/torrust-tracker). -/// -/// For more information about persistence. -#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, derive_more::Display, Clone)] -pub enum DatabaseDriver { - // TODO: Move to the database crate once that gets its own crate. - /// The Sqlite3 database driver. - Sqlite3, - /// The MySQL database driver. - MySQL, -} +pub mod announce_event; +pub mod info_hash; +pub mod pagination; +pub mod peer; +pub mod swarm_metadata; +pub mod torrent_metrics; -/// The mode the tracker will run in. +/// Duration since the Unix Epoch. +pub type DurationSinceUnixEpoch = Duration; + +/// Serializes a `DurationSinceUnixEpoch` as a Unix timestamp in milliseconds. +/// # Errors /// -/// Refer to [Torrust Tracker Configuration](https://docs.rs/torrust-tracker-configuration) -/// to know how to configure the tracker to run in each mode. -#[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] -pub enum TrackerMode { - /// Will track every new info hash and serve every peer. - #[serde(rename = "public")] - Public, +/// Will return `serde::Serializer::Error` if unable to serialize the `unix_time_value`. +pub fn ser_unix_time_value(unix_time_value: &DurationSinceUnixEpoch, ser: S) -> Result { + #[allow(clippy::cast_possible_truncation)] + ser.serialize_u64(unix_time_value.as_millis() as u64) +} - /// Will only track whitelisted info hashes. - #[serde(rename = "listed")] - Listed, +/// IP version used by the peer to connect to the tracker: IPv4 or IPv6 +#[derive(PartialEq, Eq, Debug)] +pub enum IPVersion { + /// + IPv4, + /// + IPv6, +} - /// Will only serve authenticated peers - #[serde(rename = "private")] - Private, +/// Number of bytes downloaded, uploaded or pending to download (left) by the peer. +#[derive(Hash, Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)] +pub struct NumberOfBytes(pub i64); - /// Will only track whitelisted info hashes and serve authenticated peers - #[serde(rename = "private_listed")] - PrivateListed, -} +pub type PersistentTorrents = BTreeMap; diff --git a/packages/primitives/src/pagination.rs b/packages/primitives/src/pagination.rs new file mode 100644 index 000000000..96b5ad662 --- /dev/null +++ b/packages/primitives/src/pagination.rs @@ -0,0 +1,46 @@ +use derive_more::Constructor; +use serde::Deserialize; + +/// A struct to keep information about the page when results are being paginated +#[derive(Deserialize, Copy, Clone, Debug, PartialEq, Constructor)] +pub struct Pagination { + /// The page number, starting at 0 + pub offset: u32, + /// Page size. The number of results per page + pub limit: u32, +} + +impl Pagination { + #[must_use] + pub fn new_with_options(offset_option: Option, limit_option: Option) -> Self { + let offset = match offset_option { + Some(offset) => offset, + None => Pagination::default_offset(), + }; + let limit = match limit_option { + Some(offset) => offset, + None => Pagination::default_limit(), + }; + + Self { offset, limit } + } + + #[must_use] + pub fn default_offset() -> u32 { + 0 + } + + #[must_use] + pub fn default_limit() -> u32 { + 4000 + } +} + +impl Default for Pagination { + fn default() -> Self { + Self { + offset: Self::default_offset(), + limit: Self::default_limit(), + } + } +} diff --git a/src/tracker/peer.rs b/packages/primitives/src/peer.rs similarity index 67% rename from src/tracker/peer.rs rename to packages/primitives/src/peer.rs index d6517f213..ab7559508 100644 --- a/src/tracker/peer.rs +++ b/packages/primitives/src/peer.rs @@ -3,12 +3,12 @@ //! A sample peer: //! //! ```rust,no_run -//! use torrust_tracker::tracker::peer; +//! use torrust_tracker_primitives::peer; //! use std::net::SocketAddr; //! use std::net::IpAddr; //! use std::net::Ipv4Addr; -//! use torrust_tracker::shared::clock::DurationSinceUnixEpoch; -//! use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +//! use torrust_tracker_primitives::DurationSinceUnixEpoch; +//! //! //! peer::Peer { //! peer_id: peer::Id(*b"-qB00000000000000000"), @@ -20,38 +20,26 @@ //! event: AnnounceEvent::Started, //! }; //! ``` + use std::net::{IpAddr, SocketAddr}; -use std::panic::Location; +use std::sync::Arc; -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; -use serde; use serde::Serialize; -use thiserror::Error; -use crate::shared::bit_torrent::common::{AnnounceEventDef, NumberOfBytesDef}; -use crate::shared::clock::utils::ser_unix_time_value; -use crate::shared::clock::DurationSinceUnixEpoch; - -/// IP version used by the peer to connect to the tracker: IPv4 or IPv6 -#[derive(PartialEq, Eq, Debug)] -pub enum IPVersion { - /// - IPv4, - /// - IPv6, -} +use crate::announce_event::AnnounceEvent; +use crate::{ser_unix_time_value, DurationSinceUnixEpoch, IPVersion, NumberOfBytes}; /// Peer struct used by the core `Tracker`. /// /// A sample peer: /// /// ```rust,no_run -/// use torrust_tracker::tracker::peer; +/// use torrust_tracker_primitives::peer; /// use std::net::SocketAddr; /// use std::net::IpAddr; /// use std::net::Ipv4Addr; -/// use torrust_tracker::shared::clock::DurationSinceUnixEpoch; -/// use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +/// use torrust_tracker_primitives::DurationSinceUnixEpoch; +/// /// /// peer::Peer { /// peer_id: peer::Id(*b"-qB00000000000000000"), @@ -63,7 +51,7 @@ pub enum IPVersion { /// event: AnnounceEvent::Started, /// }; /// ``` -#[derive(PartialEq, Eq, Debug, Clone, Serialize, Copy)] +#[derive(Debug, Clone, Serialize, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct Peer { /// ID used by the downloader peer pub peer_id: Id, @@ -73,19 +61,67 @@ pub struct Peer { #[serde(serialize_with = "ser_unix_time_value")] pub updated: DurationSinceUnixEpoch, /// The total amount of bytes uploaded by this peer so far - #[serde(with = "NumberOfBytesDef")] pub uploaded: NumberOfBytes, /// The total amount of bytes downloaded by this peer so far - #[serde(with = "NumberOfBytesDef")] pub downloaded: NumberOfBytes, /// The number of bytes this peer still has to download - #[serde(with = "NumberOfBytesDef")] pub left: NumberOfBytes, /// This is an optional key which maps to started, completed, or stopped (or empty, which is the same as not being present). - #[serde(with = "AnnounceEventDef")] pub event: AnnounceEvent, } +pub trait ReadInfo { + fn is_seeder(&self) -> bool; + fn get_event(&self) -> AnnounceEvent; + fn get_id(&self) -> Id; + fn get_updated(&self) -> DurationSinceUnixEpoch; + fn get_address(&self) -> SocketAddr; +} + +impl ReadInfo for Peer { + fn is_seeder(&self) -> bool { + self.left.0 <= 0 && self.event != AnnounceEvent::Stopped + } + + fn get_event(&self) -> AnnounceEvent { + self.event + } + + fn get_id(&self) -> Id { + self.peer_id + } + + fn get_updated(&self) -> DurationSinceUnixEpoch { + self.updated + } + + fn get_address(&self) -> SocketAddr { + self.peer_addr + } +} + +impl ReadInfo for Arc { + fn is_seeder(&self) -> bool { + self.left.0 <= 0 && self.event != AnnounceEvent::Stopped + } + + fn get_event(&self) -> AnnounceEvent { + self.event + } + + fn get_id(&self) -> Id { + self.peer_id + } + + fn get_updated(&self) -> DurationSinceUnixEpoch { + self.updated + } + + fn get_address(&self) -> SocketAddr { + self.peer_addr + } +} + impl Peer { #[must_use] pub fn is_seeder(&self) -> bool { @@ -110,22 +146,9 @@ impl Peer { } } -/// Peer ID. A 20-byte array. -/// -/// A string of length 20 which this downloader uses as its id. -/// Each downloader generates its own id at random at the start of a new download. -/// -/// A sample peer ID: -/// -/// ```rust,no_run -/// use torrust_tracker::tracker::peer; -/// -/// let peer_id = peer::Id(*b"-qB00000000000000000"); -/// ``` -#[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord, Copy)] -pub struct Id(pub [u8; 20]); +use std::panic::Location; -const PEER_ID_BYTES_LEN: usize = 20; +use thiserror::Error; /// Error returned when trying to convert an invalid peer id from another type. /// @@ -144,36 +167,22 @@ pub enum IdConversionError { }, } -impl Id { - /// # Panics - /// - /// Will panic if byte slice does not contains the exact amount of bytes need for the `Id`. - #[must_use] - pub fn from_bytes(bytes: &[u8]) -> Self { - assert_eq!( - PEER_ID_BYTES_LEN, - bytes.len(), - "we are testing the equality of the constant: `PEER_ID_BYTES_LEN` ({}) and the supplied `bytes` length: {}", - PEER_ID_BYTES_LEN, - bytes.len(), - ); - let mut ret = Self([0u8; PEER_ID_BYTES_LEN]); - ret.0.clone_from_slice(bytes); - ret - } - - #[must_use] - pub fn to_bytes(&self) -> [u8; 20] { - self.0 - } -} - impl From<[u8; 20]> for Id { fn from(bytes: [u8; 20]) -> Self { Id(bytes) } } +impl From for Id { + fn from(number: i32) -> Self { + let peer_id = number.to_le_bytes(); + Id::from([ + 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, peer_id[0], peer_id[1], peer_id[2], + peer_id[3], + ]) + } +} + impl TryFrom> for Id { type Error = IdConversionError; @@ -211,7 +220,47 @@ impl std::fmt::Display for Id { } } +/// Peer ID. A 20-byte array. +/// +/// A string of length 20 which this downloader uses as its id. +/// Each downloader generates its own id at random at the start of a new download. +/// +/// A sample peer ID: +/// +/// ```rust,no_run +/// use torrust_tracker_primitives::peer; +/// +/// let peer_id = peer::Id(*b"-qB00000000000000000"); +/// ``` +/// +#[derive(PartialEq, Eq, Hash, Clone, Debug, PartialOrd, Ord, Copy)] +pub struct Id(pub [u8; 20]); + +pub const PEER_ID_BYTES_LEN: usize = 20; + impl Id { + /// # Panics + /// + /// Will panic if byte slice does not contains the exact amount of bytes need for the `Id`. + #[must_use] + pub fn from_bytes(bytes: &[u8]) -> Self { + assert_eq!( + PEER_ID_BYTES_LEN, + bytes.len(), + "we are testing the equality of the constant: `PEER_ID_BYTES_LEN` ({}) and the supplied `bytes` length: {}", + PEER_ID_BYTES_LEN, + bytes.len(), + ); + let mut ret = Self([0u8; PEER_ID_BYTES_LEN]); + ret.0.clone_from_slice(bytes); + ret + } + + #[must_use] + pub fn to_bytes(&self) -> [u8; 20] { + self.0 + } + #[must_use] /// Converts to hex string. /// @@ -252,81 +301,9 @@ impl Id { } #[must_use] - pub fn get_client_name(&self) -> Option<&'static str> { - if self.0[0] == b'M' { - return Some("BitTorrent"); - } - if self.0[0] == b'-' { - let name = match &self.0[1..3] { - b"AG" | b"A~" => "Ares", - b"AR" => "Arctic", - b"AV" => "Avicora", - b"AX" => "BitPump", - b"AZ" => "Azureus", - b"BB" => "BitBuddy", - b"BC" => "BitComet", - b"BF" => "Bitflu", - b"BG" => "BTG (uses Rasterbar libtorrent)", - b"BR" => "BitRocket", - b"BS" => "BTSlave", - b"BX" => "~Bittorrent X", - b"CD" => "Enhanced CTorrent", - b"CT" => "CTorrent", - b"DE" => "DelugeTorrent", - b"DP" => "Propagate Data Client", - b"EB" => "EBit", - b"ES" => "electric sheep", - b"FT" => "FoxTorrent", - b"FW" => "FrostWire", - b"FX" => "Freebox BitTorrent", - b"GS" => "GSTorrent", - b"HL" => "Halite", - b"HN" => "Hydranode", - b"KG" => "KGet", - b"KT" => "KTorrent", - b"LH" => "LH-ABC", - b"LP" => "Lphant", - b"LT" => "libtorrent", - b"lt" => "libTorrent", - b"LW" => "LimeWire", - b"MO" => "MonoTorrent", - b"MP" => "MooPolice", - b"MR" => "Miro", - b"MT" => "MoonlightTorrent", - b"NX" => "Net Transport", - b"PD" => "Pando", - b"qB" => "qBittorrent", - b"QD" => "QQDownload", - b"QT" => "Qt 4 Torrent example", - b"RT" => "Retriever", - b"S~" => "Shareaza alpha/beta", - b"SB" => "~Swiftbit", - b"SS" => "SwarmScope", - b"ST" => "SymTorrent", - b"st" => "sharktorrent", - b"SZ" => "Shareaza", - b"TN" => "TorrentDotNET", - b"TR" => "Transmission", - b"TS" => "Torrentstorm", - b"TT" => "TuoTu", - b"UL" => "uLeecher!", - b"UT" => "µTorrent", - b"UW" => "µTorrent Web", - b"VG" => "Vagaa", - b"WD" => "WebTorrent Desktop", - b"WT" => "BitLet", - b"WW" => "WebTorrent", - b"WY" => "FireTorrent", - b"XL" => "Xunlei", - b"XT" => "XanTorrent", - b"XX" => "Xtorrent", - b"ZT" => "ZipTorrent", - _ => return None, - }; - Some(name) - } else { - None - } + pub fn get_client_name(&self) -> Option { + let peer_id = tdyne_peer_id::PeerId::from(self.0); + tdyne_peer_id_registry::parse(peer_id).ok().map(|parsed| parsed.client) } } @@ -336,9 +313,9 @@ impl Serialize for Id { S: serde::Serializer, { #[derive(Serialize)] - struct PeerIdInfo<'a> { + struct PeerIdInfo { id: Option, - client: Option<&'a str>, + client: Option, } let obj = PeerIdInfo { @@ -349,11 +326,147 @@ impl Serialize for Id { } } -#[cfg(test)] -mod test { +/// Marker Trait for Peer Vectors +pub trait Encoding: From + PartialEq {} + +impl FromIterator for Vec

{ + fn from_iter>(iter: T) -> Self { + let mut peers: Vec

= vec![]; + + for peer in iter { + peers.push(peer.into()); + } + + peers + } +} + +pub mod fixture { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + + use super::{Id, Peer}; + use crate::announce_event::AnnounceEvent; + use crate::{DurationSinceUnixEpoch, NumberOfBytes}; + + #[derive(PartialEq, Debug)] + + pub struct PeerBuilder { + peer: Peer, + } + + #[allow(clippy::derivable_impls)] + impl Default for PeerBuilder { + fn default() -> Self { + Self { peer: Peer::default() } + } + } + + impl PeerBuilder { + #[allow(dead_code)] + #[must_use] + pub fn seeder() -> Self { + let peer = Peer { + peer_id: Id(*b"-qB00000000000000001"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Completed, + }; + + Self { peer } + } + + #[allow(dead_code)] + #[must_use] + pub fn leecher() -> Self { + let peer = Peer { + peer_id: Id(*b"-qB00000000000000002"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(10), + event: AnnounceEvent::Started, + }; + + Self { peer } + } + + #[allow(dead_code)] + #[must_use] + pub fn with_peer_id(mut self, peer_id: &Id) -> Self { + self.peer.peer_id = *peer_id; + self + } + + #[allow(dead_code)] + #[must_use] + pub fn with_peer_addr(mut self, peer_addr: &SocketAddr) -> Self { + self.peer.peer_addr = *peer_addr; + self + } + + #[allow(dead_code)] + #[must_use] + pub fn with_bytes_pending_to_download(mut self, left: i64) -> Self { + self.peer.left = NumberOfBytes(left); + self + } + + #[allow(dead_code)] + #[must_use] + pub fn with_no_bytes_pending_to_download(mut self) -> Self { + self.peer.left = NumberOfBytes(0); + self + } + + #[allow(dead_code)] + #[must_use] + pub fn last_updated_on(mut self, updated: DurationSinceUnixEpoch) -> Self { + self.peer.updated = updated; + self + } + + #[allow(dead_code)] + #[must_use] + pub fn build(self) -> Peer { + self.into() + } + + #[allow(dead_code)] + #[must_use] + pub fn into(self) -> Peer { + self.peer + } + } + + impl Default for Peer { + fn default() -> Self { + Self { + peer_id: Id::default(), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + } + } + } + + impl Default for Id { + fn default() -> Self { + Self(*b"-qB00000000000000000") + } + } +} +#[cfg(test)] +pub mod test { mod torrent_peer_id { - use crate::tracker::peer; + use crate::peer; #[test] fn should_be_instantiated_from_a_byte_slice() { @@ -462,50 +575,4 @@ mod test { assert_eq!(peer::Id(*b"-qB00000000000000000").to_bytes(), *b"-qB00000000000000000"); } } - - mod torrent_peer { - - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - use serde_json::Value; - - use crate::shared::clock::{Current, Time}; - use crate::tracker::peer::{self, Peer}; - - #[test] - fn it_should_be_serializable() { - let torrent_peer = Peer { - peer_id: peer::Id(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: Current::now(), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), - event: AnnounceEvent::Started, - }; - - let raw_json = serde_json::to_string(&torrent_peer).unwrap(); - - let expected_raw_json = r#" - { - "peer_id": { - "id": "0x2d71423030303030303030303030303030303030", - "client": "qBittorrent" - }, - "peer_addr":"126.0.0.1:8080", - "updated":0, - "uploaded":0, - "downloaded":0, - "left":0, - "event":"Started" - } - "#; - - assert_eq!( - serde_json::from_str::(&raw_json).unwrap(), - serde_json::from_str::(expected_raw_json).unwrap() - ); - } - } } diff --git a/packages/primitives/src/swarm_metadata.rs b/packages/primitives/src/swarm_metadata.rs new file mode 100644 index 000000000..ca880b54d --- /dev/null +++ b/packages/primitives/src/swarm_metadata.rs @@ -0,0 +1,22 @@ +use derive_more::Constructor; + +/// Swarm statistics for one torrent. +/// Swarm metadata dictionary in the scrape response. +/// +/// See [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) +#[derive(Copy, Clone, Debug, PartialEq, Default, Constructor)] +pub struct SwarmMetadata { + /// (i.e `completed`): The number of peers that have ever completed downloading + pub downloaded: u32, // + /// (i.e `seeders`): The number of active peers that have completed downloading (seeders) + pub complete: u32, //seeders + /// (i.e `leechers`): The number of active peers that have not completed downloading (leechers) + pub incomplete: u32, +} + +impl SwarmMetadata { + #[must_use] + pub fn zeroed() -> Self { + Self::default() + } +} diff --git a/packages/primitives/src/torrent_metrics.rs b/packages/primitives/src/torrent_metrics.rs new file mode 100644 index 000000000..02de02954 --- /dev/null +++ b/packages/primitives/src/torrent_metrics.rs @@ -0,0 +1,25 @@ +use std::ops::AddAssign; + +/// Structure that holds general `Tracker` torrents metrics. +/// +/// Metrics are aggregate values for all torrents. +#[derive(Copy, Clone, Debug, PartialEq, Default)] +pub struct TorrentsMetrics { + /// Total number of seeders for all torrents + pub complete: u64, + /// Total number of peers that have ever completed downloading for all torrents. + pub downloaded: u64, + /// Total number of leechers for all torrents. + pub incomplete: u64, + /// Total number of torrents. + pub torrents: u64, +} + +impl AddAssign for TorrentsMetrics { + fn add_assign(&mut self, rhs: Self) { + self.complete += rhs.complete; + self.downloaded += rhs.downloaded; + self.incomplete += rhs.incomplete; + self.torrents += rhs.torrents; + } +} diff --git a/packages/test-helpers/Cargo.toml b/packages/test-helpers/Cargo.toml index 85edb99af..5a4220b53 100644 --- a/packages/test-helpers/Cargo.toml +++ b/packages/test-helpers/Cargo.toml @@ -8,14 +8,12 @@ authors.workspace = true documentation.workspace = true edition.workspace = true homepage.workspace = true -license-file.workspace = true +license.workspace = true publish.workspace = true repository.workspace = true rust-version.workspace = true version.workspace = true [dependencies] -lazy_static = "1.4" -rand = "0.8.5" -torrust-tracker-configuration = { version = "3.0.0-alpha.11", path = "../configuration" } -torrust-tracker-primitives = { version = "3.0.0-alpha.11", path = "../primitives" } +rand = "0" +torrust-tracker-configuration = { version = "3.0.0-alpha.12", path = "../configuration" } diff --git a/packages/test-helpers/src/configuration.rs b/packages/test-helpers/src/configuration.rs index 437475ee2..0c4029b69 100644 --- a/packages/test-helpers/src/configuration.rs +++ b/packages/test-helpers/src/configuration.rs @@ -1,9 +1,8 @@ //! Tracker configuration factories for testing. use std::env; -use std::net::IpAddr; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; -use torrust_tracker_configuration::Configuration; -use torrust_tracker_primitives::TrackerMode; +use torrust_tracker_configuration::{Configuration, HttpApi, HttpTracker, Threshold, UdpTracker}; use crate::random; @@ -13,11 +12,11 @@ use crate::random; /// > **NOTICE**: This configuration is not meant to be used in production. /// /// > **NOTICE**: Port 0 is used for ephemeral ports, which means that the OS -/// will assign a random free port for the tracker to use. +/// > will assign a random free port for the tracker to use. /// -/// > **NOTICE**: You can change the log level to `debug` to see the logs of the -/// tracker while running the tests. That can be particularly useful when -/// debugging tests. +/// > **NOTICE**: You can change the log threshold to `debug` to see the logs of the +/// > tracker while running the tests. That can be particularly useful when +/// > debugging tests. /// /// # Panics /// @@ -27,31 +26,41 @@ pub fn ephemeral() -> Configuration { // todo: disable services that are not needed. // For example: a test for the UDP tracker should disable the API and HTTP tracker. - let mut config = Configuration { - log_level: Some("off".to_owned()), // Change to `debug` for tests debugging - ..Default::default() - }; + let mut config = Configuration::default(); + + config.logging.threshold = Threshold::Off; // Change to `debug` for tests debugging // Ephemeral socket address for API let api_port = 0u16; - config.http_api.enabled = true; - config.http_api.bind_address = format!("127.0.0.1:{}", &api_port); + let mut http_api = HttpApi { + bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), api_port), + ..Default::default() + }; + http_api.add_token("admin", "MyAccessToken"); + config.http_api = Some(http_api); + + // Ephemeral socket address for Health Check API + let health_check_api_port = 0u16; + config.health_check_api.bind_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), health_check_api_port); // Ephemeral socket address for UDP tracker let udp_port = 0u16; - config.udp_trackers[0].enabled = true; - config.udp_trackers[0].bind_address = format!("127.0.0.1:{}", &udp_port); + config.udp_trackers = Some(vec![UdpTracker { + bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), udp_port), + }]); // Ephemeral socket address for HTTP tracker let http_port = 0u16; - config.http_trackers[0].enabled = true; - config.http_trackers[0].bind_address = format!("127.0.0.1:{}", &http_port); + config.http_trackers = Some(vec![HttpTracker { + bind_address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), http_port), + tsl_config: None, + }]); // Ephemeral sqlite database let temp_directory = env::temp_dir(); let random_db_id = random::string(16); let temp_file = temp_directory.join(format!("data_{random_db_id}.db")); - config.db_path = temp_file.to_str().unwrap().to_owned(); + temp_file.to_str().unwrap().clone_into(&mut config.core.database.path); config } @@ -61,7 +70,7 @@ pub fn ephemeral() -> Configuration { pub fn ephemeral_with_reverse_proxy() -> Configuration { let mut cfg = ephemeral(); - cfg.on_reverse_proxy = true; + cfg.core.net.on_reverse_proxy = true; cfg } @@ -71,47 +80,48 @@ pub fn ephemeral_with_reverse_proxy() -> Configuration { pub fn ephemeral_without_reverse_proxy() -> Configuration { let mut cfg = ephemeral(); - cfg.on_reverse_proxy = false; + cfg.core.net.on_reverse_proxy = false; cfg } /// Ephemeral configuration with `public` mode. #[must_use] -pub fn ephemeral_mode_public() -> Configuration { +pub fn ephemeral_public() -> Configuration { let mut cfg = ephemeral(); - cfg.mode = TrackerMode::Public; + cfg.core.private = false; cfg } /// Ephemeral configuration with `private` mode. #[must_use] -pub fn ephemeral_mode_private() -> Configuration { +pub fn ephemeral_private() -> Configuration { let mut cfg = ephemeral(); - cfg.mode = TrackerMode::Private; + cfg.core.private = true; cfg } /// Ephemeral configuration with `listed` mode. #[must_use] -pub fn ephemeral_mode_whitelisted() -> Configuration { +pub fn ephemeral_listed() -> Configuration { let mut cfg = ephemeral(); - cfg.mode = TrackerMode::Listed; + cfg.core.listed = true; cfg } /// Ephemeral configuration with `private_listed` mode. #[must_use] -pub fn ephemeral_mode_private_whitelisted() -> Configuration { +pub fn ephemeral_private_and_listed() -> Configuration { let mut cfg = ephemeral(); - cfg.mode = TrackerMode::PrivateListed; + cfg.core.private = true; + cfg.core.listed = true; cfg } @@ -121,7 +131,7 @@ pub fn ephemeral_mode_private_whitelisted() -> Configuration { pub fn ephemeral_with_external_ip(ip: IpAddr) -> Configuration { let mut cfg = ephemeral(); - cfg.external_ip = Some(ip.to_string()); + cfg.core.net.external_ip = Some(ip); cfg } @@ -132,11 +142,31 @@ pub fn ephemeral_with_external_ip(ip: IpAddr) -> Configuration { pub fn ephemeral_ipv6() -> Configuration { let mut cfg = ephemeral(); - let ipv6 = format!("[::]:{}", 0); + let ipv6 = SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)), 0); + + if let Some(ref mut http_api) = cfg.http_api { + http_api.bind_address.clone_from(&ipv6); + }; + + if let Some(ref mut http_trackers) = cfg.http_trackers { + http_trackers[0].bind_address.clone_from(&ipv6); + } + + if let Some(ref mut udp_trackers) = cfg.udp_trackers { + udp_trackers[0].bind_address.clone_from(&ipv6); + } + + cfg +} + +/// Ephemeral without running any services. +#[must_use] +pub fn ephemeral_with_no_services() -> Configuration { + let mut cfg = ephemeral(); - cfg.http_api.bind_address = ipv6.clone(); - cfg.http_trackers[0].bind_address = ipv6.clone(); - cfg.udp_trackers[0].bind_address = ipv6; + cfg.http_api = None; + cfg.http_trackers = None; + cfg.udp_trackers = None; cfg } diff --git a/packages/torrent-repository/Cargo.toml b/packages/torrent-repository/Cargo.toml new file mode 100644 index 000000000..f1f85a52d --- /dev/null +++ b/packages/torrent-repository/Cargo.toml @@ -0,0 +1,35 @@ +[package] +description = "A library that provides a repository of torrents files and their peers." +keywords = ["library", "repository", "torrents"] +name = "torrust-tracker-torrent-repository" +readme = "README.md" + +authors.workspace = true +categories.workspace = true +documentation.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +publish.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +crossbeam-skiplist = "0" +dashmap = "6" +futures = "0" +parking_lot = "0" +tokio = { version = "1", features = ["macros", "net", "rt-multi-thread", "signal", "sync"] } +torrust-tracker-clock = { version = "3.0.0-alpha.12", path = "../clock" } +torrust-tracker-configuration = { version = "3.0.0-alpha.12", path = "../configuration" } +torrust-tracker-primitives = { version = "3.0.0-alpha.12", path = "../primitives" } + +[dev-dependencies] +async-std = { version = "1", features = ["attributes", "tokio1"] } +criterion = { version = "0", features = ["async_tokio"] } +rstest = "0" + +[[bench]] +harness = false +name = "repository_benchmark" diff --git a/packages/torrent-repository/README.md b/packages/torrent-repository/README.md new file mode 100644 index 000000000..ffc71f1d7 --- /dev/null +++ b/packages/torrent-repository/README.md @@ -0,0 +1,32 @@ +# Torrust Tracker Torrent Repository + +A library to provide a torrent repository to the [Torrust Tracker](https://github.com/torrust/torrust-tracker). + +## Benchmarking + +```console +cargo bench -p torrust-tracker-torrent-repository +``` + +Example partial output: + +```output + Running benches/repository_benchmark.rs (target/release/deps/repository_benchmark-a9b0013c8d09c3c3) +add_one_torrent/RwLockStd + time: [63.057 ns 63.242 ns 63.506 ns] +Found 12 outliers among 100 measurements (12.00%) + 2 (2.00%) low severe + 2 (2.00%) low mild + 2 (2.00%) high mild + 6 (6.00%) high severe +add_one_torrent/RwLockStdMutexStd + time: [62.505 ns 63.077 ns 63.817 ns] +``` + +## Documentation + +[Crate documentation](https://docs.rs/torrust-tracker-torrent-repository). + +## License + +The project is licensed under the terms of the [GNU AFFERO GENERAL PUBLIC LICENSE](./LICENSE). diff --git a/packages/torrent-repository/benches/helpers/asyn.rs b/packages/torrent-repository/benches/helpers/asyn.rs new file mode 100644 index 000000000..1c6d9d915 --- /dev/null +++ b/packages/torrent-repository/benches/helpers/asyn.rs @@ -0,0 +1,153 @@ +use std::sync::Arc; +use std::time::{Duration, Instant}; + +use futures::stream::FuturesUnordered; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_torrent_repository::repository::RepositoryAsync; + +use super::utils::{generate_unique_info_hashes, DEFAULT_PEER}; + +pub async fn add_one_torrent(samples: u64) -> Duration +where + V: RepositoryAsync + Default, +{ + let start = Instant::now(); + + for _ in 0..samples { + let torrent_repository = V::default(); + + let info_hash = InfoHash([0; 20]); + + torrent_repository.upsert_peer(&info_hash, &DEFAULT_PEER).await; + + torrent_repository.get_swarm_metadata(&info_hash).await; + } + + start.elapsed() +} + +// Add one torrent ten thousand times in parallel (depending on the set worker threads) +pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: u64, sleep: Option) -> Duration +where + V: RepositoryAsync + Default, + Arc: Clone + Send + Sync + 'static, +{ + let torrent_repository = Arc::::default(); + let info_hash: &'static InfoHash = &InfoHash([0; 20]); + let handles = FuturesUnordered::new(); + + // Add the torrent/peer to the torrent repository + torrent_repository.upsert_peer(info_hash, &DEFAULT_PEER).await; + + torrent_repository.get_swarm_metadata(info_hash).await; + + let start = Instant::now(); + + for _ in 0..samples { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone.upsert_peer(info_hash, &DEFAULT_PEER).await; + + torrent_repository_clone.get_swarm_metadata(info_hash).await; + + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() +} + +// Add ten thousand torrents in parallel (depending on the set worker threads) +pub async fn add_multiple_torrents_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: u64, + sleep: Option, +) -> Duration +where + V: RepositoryAsync + Default, + Arc: Clone + Send + Sync + 'static, +{ + let torrent_repository = Arc::::default(); + let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in a usize")); + let handles = FuturesUnordered::new(); + + let start = Instant::now(); + + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER).await; + + torrent_repository_clone.get_swarm_metadata(&info_hash).await; + + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() +} + +// Async update ten thousand torrents in parallel (depending on the set worker threads) +pub async fn update_multiple_torrents_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: u64, + sleep: Option, +) -> Duration +where + V: RepositoryAsync + Default, + Arc: Clone + Send + Sync + 'static, +{ + let torrent_repository = Arc::::default(); + let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in usize")); + let handles = FuturesUnordered::new(); + + // Add the torrents/peers to the torrent repository + for info_hash in &info_hashes { + torrent_repository.upsert_peer(info_hash, &DEFAULT_PEER).await; + torrent_repository.get_swarm_metadata(info_hash).await; + } + + let start = Instant::now(); + + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER).await; + torrent_repository_clone.get_swarm_metadata(&info_hash).await; + + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() +} diff --git a/packages/torrent-repository/benches/helpers/mod.rs b/packages/torrent-repository/benches/helpers/mod.rs new file mode 100644 index 000000000..1026aa4bf --- /dev/null +++ b/packages/torrent-repository/benches/helpers/mod.rs @@ -0,0 +1,3 @@ +pub mod asyn; +pub mod sync; +pub mod utils; diff --git a/packages/torrent-repository/benches/helpers/sync.rs b/packages/torrent-repository/benches/helpers/sync.rs new file mode 100644 index 000000000..63fccfc77 --- /dev/null +++ b/packages/torrent-repository/benches/helpers/sync.rs @@ -0,0 +1,155 @@ +use std::sync::Arc; +use std::time::{Duration, Instant}; + +use futures::stream::FuturesUnordered; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_torrent_repository::repository::Repository; + +use super::utils::{generate_unique_info_hashes, DEFAULT_PEER}; + +// Simply add one torrent +#[must_use] +pub fn add_one_torrent(samples: u64) -> Duration +where + V: Repository + Default, +{ + let start = Instant::now(); + + for _ in 0..samples { + let torrent_repository = V::default(); + + let info_hash = InfoHash([0; 20]); + + torrent_repository.upsert_peer(&info_hash, &DEFAULT_PEER); + + torrent_repository.get_swarm_metadata(&info_hash); + } + + start.elapsed() +} + +// Add one torrent ten thousand times in parallel (depending on the set worker threads) +pub async fn update_one_torrent_in_parallel(runtime: &tokio::runtime::Runtime, samples: u64, sleep: Option) -> Duration +where + V: Repository + Default, + Arc: Clone + Send + Sync + 'static, +{ + let torrent_repository = Arc::::default(); + let info_hash: &'static InfoHash = &InfoHash([0; 20]); + let handles = FuturesUnordered::new(); + + // Add the torrent/peer to the torrent repository + torrent_repository.upsert_peer(info_hash, &DEFAULT_PEER); + + torrent_repository.get_swarm_metadata(info_hash); + + let start = Instant::now(); + + for _ in 0..samples { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone.upsert_peer(info_hash, &DEFAULT_PEER); + + torrent_repository_clone.get_swarm_metadata(info_hash); + + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() +} + +// Add ten thousand torrents in parallel (depending on the set worker threads) +pub async fn add_multiple_torrents_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: u64, + sleep: Option, +) -> Duration +where + V: Repository + Default, + Arc: Clone + Send + Sync + 'static, +{ + let torrent_repository = Arc::::default(); + let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in a usize")); + let handles = FuturesUnordered::new(); + + let start = Instant::now(); + + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER); + + torrent_repository_clone.get_swarm_metadata(&info_hash); + + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() +} + +// Update ten thousand torrents in parallel (depending on the set worker threads) +pub async fn update_multiple_torrents_in_parallel( + runtime: &tokio::runtime::Runtime, + samples: u64, + sleep: Option, +) -> Duration +where + V: Repository + Default, + Arc: Clone + Send + Sync + 'static, +{ + let torrent_repository = Arc::::default(); + let info_hashes = generate_unique_info_hashes(samples.try_into().expect("it should fit in usize")); + let handles = FuturesUnordered::new(); + + // Add the torrents/peers to the torrent repository + for info_hash in &info_hashes { + torrent_repository.upsert_peer(info_hash, &DEFAULT_PEER); + torrent_repository.get_swarm_metadata(info_hash); + } + + let start = Instant::now(); + + for info_hash in info_hashes { + let torrent_repository_clone = torrent_repository.clone(); + + let handle = runtime.spawn(async move { + torrent_repository_clone.upsert_peer(&info_hash, &DEFAULT_PEER); + torrent_repository_clone.get_swarm_metadata(&info_hash); + + if let Some(sleep_time) = sleep { + let start_time = std::time::Instant::now(); + + while start_time.elapsed().as_nanos() < u128::from(sleep_time) {} + } + }); + + handles.push(handle); + } + + // Await all tasks + futures::future::join_all(handles).await; + + start.elapsed() +} diff --git a/packages/torrent-repository/benches/helpers/utils.rs b/packages/torrent-repository/benches/helpers/utils.rs new file mode 100644 index 000000000..170194806 --- /dev/null +++ b/packages/torrent-repository/benches/helpers/utils.rs @@ -0,0 +1,40 @@ +use std::collections::HashSet; +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use torrust_tracker_primitives::announce_event::AnnounceEvent; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer::{Id, Peer}; +use torrust_tracker_primitives::{DurationSinceUnixEpoch, NumberOfBytes}; + +pub const DEFAULT_PEER: Peer = Peer { + peer_id: Id([0; 20]), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), + updated: DurationSinceUnixEpoch::from_secs(0), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, +}; + +#[must_use] +#[allow(clippy::missing_panics_doc)] +pub fn generate_unique_info_hashes(size: usize) -> Vec { + let mut result = HashSet::new(); + + let mut bytes = [0u8; 20]; + + #[allow(clippy::cast_possible_truncation)] + for i in 0..size { + bytes[0] = (i & 0xFF) as u8; + bytes[1] = ((i >> 8) & 0xFF) as u8; + bytes[2] = ((i >> 16) & 0xFF) as u8; + bytes[3] = ((i >> 24) & 0xFF) as u8; + + let info_hash = InfoHash(bytes); + result.insert(info_hash); + } + + assert_eq!(result.len(), size); + + result.into_iter().collect() +} diff --git a/packages/torrent-repository/benches/repository_benchmark.rs b/packages/torrent-repository/benches/repository_benchmark.rs new file mode 100644 index 000000000..4e50f1454 --- /dev/null +++ b/packages/torrent-repository/benches/repository_benchmark.rs @@ -0,0 +1,270 @@ +use std::time::Duration; + +mod helpers; + +use criterion::{criterion_group, criterion_main, Criterion}; +use torrust_tracker_torrent_repository::{ + TorrentsDashMapMutexStd, TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, TorrentsRwLockTokio, + TorrentsRwLockTokioMutexStd, TorrentsRwLockTokioMutexTokio, TorrentsSkipMapMutexParkingLot, TorrentsSkipMapMutexStd, + TorrentsSkipMapRwLockParkingLot, +}; + +use crate::helpers::{asyn, sync}; + +fn add_one_torrent(c: &mut Criterion) { + let rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); + + let mut group = c.benchmark_group("add_one_torrent"); + + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_millis(1000)); + + group.bench_function("RwLockStd", |b| { + b.iter_custom(sync::add_one_torrent::); + }); + + group.bench_function("RwLockStdMutexStd", |b| { + b.iter_custom(sync::add_one_torrent::); + }); + + group.bench_function("RwLockStdMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(asyn::add_one_torrent::); + }); + + group.bench_function("RwLockTokio", |b| { + b.to_async(&rt).iter_custom(asyn::add_one_torrent::); + }); + + group.bench_function("RwLockTokioMutexStd", |b| { + b.to_async(&rt) + .iter_custom(asyn::add_one_torrent::); + }); + + group.bench_function("RwLockTokioMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(asyn::add_one_torrent::); + }); + + group.bench_function("SkipMapMutexStd", |b| { + b.iter_custom(sync::add_one_torrent::); + }); + + group.bench_function("SkipMapMutexParkingLot", |b| { + b.iter_custom(sync::add_one_torrent::); + }); + + group.bench_function("SkipMapRwLockParkingLot", |b| { + b.iter_custom(sync::add_one_torrent::); + }); + + group.bench_function("DashMapMutexStd", |b| { + b.iter_custom(sync::add_one_torrent::); + }); + + group.finish(); +} + +fn add_multiple_torrents_in_parallel(c: &mut Criterion) { + let rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); + + let mut group = c.benchmark_group("add_multiple_torrents_in_parallel"); + + //group.sampling_mode(criterion::SamplingMode::Flat); + //group.sample_size(10); + + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_millis(1000)); + + group.bench_function("RwLockStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("SkipMapMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("SkipMapMutexParkingLot", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("SkipMapRwLockParkingLot", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("DashMapMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::add_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.finish(); +} + +fn update_one_torrent_in_parallel(c: &mut Criterion) { + let rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); + + let mut group = c.benchmark_group("update_one_torrent_in_parallel"); + + //group.sampling_mode(criterion::SamplingMode::Flat); + //group.sample_size(10); + + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_millis(1000)); + + group.bench_function("RwLockStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("SkipMapMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("SkipMapMutexParkingLot", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("SkipMapRwLockParkingLot", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("DashMapMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_one_torrent_in_parallel::(&rt, iters, None)); + }); + + group.finish(); +} + +fn update_multiple_torrents_in_parallel(c: &mut Criterion) { + let rt = tokio::runtime::Builder::new_multi_thread().worker_threads(4).build().unwrap(); + + let mut group = c.benchmark_group("update_multiple_torrents_in_parallel"); + + //group.sampling_mode(criterion::SamplingMode::Flat); + //group.sample_size(10); + + group.warm_up_time(Duration::from_millis(500)); + group.measurement_time(Duration::from_millis(1000)); + + group.bench_function("RwLockStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockStdMutexTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokio", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| asyn::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("RwLockTokioMutexTokio", |b| { + b.to_async(&rt).iter_custom(|iters| { + asyn::update_multiple_torrents_in_parallel::(&rt, iters, None) + }); + }); + + group.bench_function("SkipMapMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.bench_function("SkipMapMutexParkingLot", |b| { + b.to_async(&rt).iter_custom(|iters| { + sync::update_multiple_torrents_in_parallel::(&rt, iters, None) + }); + }); + + group.bench_function("SkipMapRwLockParkingLot", |b| { + b.to_async(&rt).iter_custom(|iters| { + sync::update_multiple_torrents_in_parallel::(&rt, iters, None) + }); + }); + + group.bench_function("DashMapMutexStd", |b| { + b.to_async(&rt) + .iter_custom(|iters| sync::update_multiple_torrents_in_parallel::(&rt, iters, None)); + }); + + group.finish(); +} + +criterion_group!( + benches, + add_one_torrent, + add_multiple_torrents_in_parallel, + update_one_torrent_in_parallel, + update_multiple_torrents_in_parallel +); +criterion_main!(benches); diff --git a/packages/torrent-repository/src/entry/mod.rs b/packages/torrent-repository/src/entry/mod.rs new file mode 100644 index 000000000..b920839d9 --- /dev/null +++ b/packages/torrent-repository/src/entry/mod.rs @@ -0,0 +1,92 @@ +use std::fmt::Debug; +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +use self::peer_list::PeerList; + +pub mod mutex_parking_lot; +pub mod mutex_std; +pub mod mutex_tokio; +pub mod peer_list; +pub mod rw_lock_parking_lot; +pub mod single; + +pub trait Entry { + /// It returns the swarm metadata (statistics) as a struct: + /// + /// `(seeders, completed, leechers)` + fn get_swarm_metadata(&self) -> SwarmMetadata; + + /// Returns True if Still a Valid Entry according to the Tracker Policy + fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool; + + /// Returns True if the Peers is Empty + fn peers_is_empty(&self) -> bool; + + /// Returns the number of Peers + fn get_peers_len(&self) -> usize; + + /// Get all swarm peers, optionally limiting the result. + fn get_peers(&self, limit: Option) -> Vec>; + + /// It returns the list of peers for a given peer client, optionally limiting the + /// result. + /// + /// It filters out the input peer, typically because we want to return this + /// list of peers to that client peer. + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec>; + + /// It updates a peer and returns true if the number of complete downloads have increased. + /// + /// The number of peers that have complete downloading is synchronously updated when peers are updated. + /// That's the total torrent downloads counter. + fn upsert_peer(&mut self, peer: &peer::Peer) -> bool; + + /// It removes peer from the swarm that have not been updated for more than `current_cutoff` seconds + fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch); +} + +#[allow(clippy::module_name_repetitions)] +pub trait EntrySync { + fn get_swarm_metadata(&self) -> SwarmMetadata; + fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool; + fn peers_is_empty(&self) -> bool; + fn get_peers_len(&self) -> usize; + fn get_peers(&self, limit: Option) -> Vec>; + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec>; + fn upsert_peer(&self, peer: &peer::Peer) -> bool; + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch); +} + +#[allow(clippy::module_name_repetitions)] +pub trait EntryAsync { + fn get_swarm_metadata(&self) -> impl std::future::Future + Send; + fn meets_retaining_policy(self, policy: &TrackerPolicy) -> impl std::future::Future + Send; + fn peers_is_empty(&self) -> impl std::future::Future + Send; + fn get_peers_len(&self) -> impl std::future::Future + Send; + fn get_peers(&self, limit: Option) -> impl std::future::Future>> + Send; + fn get_peers_for_client( + &self, + client: &SocketAddr, + limit: Option, + ) -> impl std::future::Future>> + Send; + fn upsert_peer(self, peer: &peer::Peer) -> impl std::future::Future + Send; + fn remove_inactive_peers(self, current_cutoff: DurationSinceUnixEpoch) -> impl std::future::Future + Send; +} + +/// A data structure containing all the information about a torrent in the tracker. +/// +/// This is the tracker entry for a given torrent and contains the swarm data, +/// that's the list of all the peers trying to download the same torrent. +/// The tracker keeps one entry like this for every torrent. +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Torrent { + /// A network of peers that are all trying to download the torrent associated to this entry + pub(crate) swarm: PeerList, + /// The number of peers that have ever completed downloading the torrent associated to this entry + pub(crate) downloaded: u32, +} diff --git a/packages/torrent-repository/src/entry/mutex_parking_lot.rs b/packages/torrent-repository/src/entry/mutex_parking_lot.rs new file mode 100644 index 000000000..738c3ff9d --- /dev/null +++ b/packages/torrent-repository/src/entry/mutex_parking_lot.rs @@ -0,0 +1,49 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +use super::{Entry, EntrySync}; +use crate::{EntryMutexParkingLot, EntrySingle}; + +impl EntrySync for EntryMutexParkingLot { + fn get_swarm_metadata(&self) -> SwarmMetadata { + self.lock().get_swarm_metadata() + } + + fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { + self.lock().meets_retaining_policy(policy) + } + + fn peers_is_empty(&self) -> bool { + self.lock().peers_is_empty() + } + + fn get_peers_len(&self) -> usize { + self.lock().get_peers_len() + } + + fn get_peers(&self, limit: Option) -> Vec> { + self.lock().get_peers(limit) + } + + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + self.lock().get_peers_for_client(client, limit) + } + + fn upsert_peer(&self, peer: &peer::Peer) -> bool { + self.lock().upsert_peer(peer) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + self.lock().remove_inactive_peers(current_cutoff); + } +} + +impl From for EntryMutexParkingLot { + fn from(entry: EntrySingle) -> Self { + Arc::new(parking_lot::Mutex::new(entry)) + } +} diff --git a/packages/torrent-repository/src/entry/mutex_std.rs b/packages/torrent-repository/src/entry/mutex_std.rs new file mode 100644 index 000000000..0ab70a96f --- /dev/null +++ b/packages/torrent-repository/src/entry/mutex_std.rs @@ -0,0 +1,51 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +use super::{Entry, EntrySync}; +use crate::{EntryMutexStd, EntrySingle}; + +impl EntrySync for EntryMutexStd { + fn get_swarm_metadata(&self) -> SwarmMetadata { + self.lock().expect("it should get a lock").get_swarm_metadata() + } + + fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { + self.lock().expect("it should get a lock").meets_retaining_policy(policy) + } + + fn peers_is_empty(&self) -> bool { + self.lock().expect("it should get a lock").peers_is_empty() + } + + fn get_peers_len(&self) -> usize { + self.lock().expect("it should get a lock").get_peers_len() + } + + fn get_peers(&self, limit: Option) -> Vec> { + self.lock().expect("it should get lock").get_peers(limit) + } + + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + self.lock().expect("it should get lock").get_peers_for_client(client, limit) + } + + fn upsert_peer(&self, peer: &peer::Peer) -> bool { + self.lock().expect("it should lock the entry").upsert_peer(peer) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + self.lock() + .expect("it should lock the entry") + .remove_inactive_peers(current_cutoff); + } +} + +impl From for EntryMutexStd { + fn from(entry: EntrySingle) -> Self { + Arc::new(std::sync::Mutex::new(entry)) + } +} diff --git a/packages/torrent-repository/src/entry/mutex_tokio.rs b/packages/torrent-repository/src/entry/mutex_tokio.rs new file mode 100644 index 000000000..6db789a72 --- /dev/null +++ b/packages/torrent-repository/src/entry/mutex_tokio.rs @@ -0,0 +1,49 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +use super::{Entry, EntryAsync}; +use crate::{EntryMutexTokio, EntrySingle}; + +impl EntryAsync for EntryMutexTokio { + async fn get_swarm_metadata(&self) -> SwarmMetadata { + self.lock().await.get_swarm_metadata() + } + + async fn meets_retaining_policy(self, policy: &TrackerPolicy) -> bool { + self.lock().await.meets_retaining_policy(policy) + } + + async fn peers_is_empty(&self) -> bool { + self.lock().await.peers_is_empty() + } + + async fn get_peers_len(&self) -> usize { + self.lock().await.get_peers_len() + } + + async fn get_peers(&self, limit: Option) -> Vec> { + self.lock().await.get_peers(limit) + } + + async fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + self.lock().await.get_peers_for_client(client, limit) + } + + async fn upsert_peer(self, peer: &peer::Peer) -> bool { + self.lock().await.upsert_peer(peer) + } + + async fn remove_inactive_peers(self, current_cutoff: DurationSinceUnixEpoch) { + self.lock().await.remove_inactive_peers(current_cutoff); + } +} + +impl From for EntryMutexTokio { + fn from(entry: EntrySingle) -> Self { + Arc::new(tokio::sync::Mutex::new(entry)) + } +} diff --git a/packages/torrent-repository/src/entry/peer_list.rs b/packages/torrent-repository/src/entry/peer_list.rs new file mode 100644 index 000000000..3f69edbb5 --- /dev/null +++ b/packages/torrent-repository/src/entry/peer_list.rs @@ -0,0 +1,289 @@ +//! A peer list. +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +// code-review: the current implementation uses the peer Id as the ``BTreeMap`` +// key. That would allow adding two identical peers except for the Id. +// For example, two peers with the same socket address but a different peer Id +// would be allowed. That would lead to duplicated peers in the tracker responses. + +#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct PeerList { + peers: std::collections::BTreeMap>, +} + +impl PeerList { + #[must_use] + pub fn len(&self) -> usize { + self.peers.len() + } + + #[must_use] + pub fn is_empty(&self) -> bool { + self.peers.is_empty() + } + + pub fn upsert(&mut self, value: Arc) -> Option> { + self.peers.insert(value.peer_id, value) + } + + pub fn remove(&mut self, key: &peer::Id) -> Option> { + self.peers.remove(key) + } + + pub fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { + self.peers + .retain(|_, peer| peer::ReadInfo::get_updated(peer) > current_cutoff); + } + + #[must_use] + pub fn get(&self, peer_id: &peer::Id) -> Option<&Arc> { + self.peers.get(peer_id) + } + + #[must_use] + pub fn get_all(&self, limit: Option) -> Vec> { + match limit { + Some(limit) => self.peers.values().take(limit).cloned().collect(), + None => self.peers.values().cloned().collect(), + } + } + + #[must_use] + pub fn seeders_and_leechers(&self) -> (usize, usize) { + let seeders = self.peers.values().filter(|peer| peer.is_seeder()).count(); + let leechers = self.len() - seeders; + + (seeders, leechers) + } + + #[must_use] + pub fn get_peers_excluding_addr(&self, peer_addr: &SocketAddr, limit: Option) -> Vec> { + match limit { + Some(limit) => self + .peers + .values() + // Take peers which are not the client peer + .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != *peer_addr) + // Limit the number of peers on the result + .take(limit) + .cloned() + .collect(), + None => self + .peers + .values() + // Take peers which are not the client peer + .filter(|peer| peer::ReadInfo::get_address(peer.as_ref()) != *peer_addr) + .cloned() + .collect(), + } + } +} + +#[cfg(test)] +mod tests { + + mod it_should { + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; + + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use torrust_tracker_primitives::peer::{self}; + use torrust_tracker_primitives::DurationSinceUnixEpoch; + + use crate::entry::peer_list::PeerList; + + #[test] + fn be_empty_when_no_peers_have_been_inserted() { + let peer_list = PeerList::default(); + + assert!(peer_list.is_empty()); + } + + #[test] + fn have_zero_length_when_no_peers_have_been_inserted() { + let peer_list = PeerList::default(); + + assert_eq!(peer_list.len(), 0); + } + + #[test] + fn allow_inserting_a_new_peer() { + let mut peer_list = PeerList::default(); + + let peer = PeerBuilder::default().build(); + + assert_eq!(peer_list.upsert(peer.into()), None); + } + + #[test] + fn allow_updating_a_preexisting_peer() { + let mut peer_list = PeerList::default(); + + let peer = PeerBuilder::default().build(); + + peer_list.upsert(peer.into()); + + assert_eq!(peer_list.upsert(peer.into()), Some(Arc::new(peer))); + } + + #[test] + fn allow_getting_all_peers() { + let mut peer_list = PeerList::default(); + + let peer = PeerBuilder::default().build(); + + peer_list.upsert(peer.into()); + + assert_eq!(peer_list.get_all(None), [Arc::new(peer)]); + } + + #[test] + fn allow_getting_one_peer_by_id() { + let mut peer_list = PeerList::default(); + + let peer = PeerBuilder::default().build(); + + peer_list.upsert(peer.into()); + + assert_eq!(peer_list.get(&peer.peer_id), Some(Arc::new(peer)).as_ref()); + } + + #[test] + fn increase_the_number_of_peers_after_inserting_a_new_one() { + let mut peer_list = PeerList::default(); + + let peer = PeerBuilder::default().build(); + + peer_list.upsert(peer.into()); + + assert_eq!(peer_list.len(), 1); + } + + #[test] + fn decrease_the_number_of_peers_after_removing_one() { + let mut peer_list = PeerList::default(); + + let peer = PeerBuilder::default().build(); + + peer_list.upsert(peer.into()); + + peer_list.remove(&peer.peer_id); + + assert!(peer_list.is_empty()); + } + + #[test] + fn allow_removing_an_existing_peer() { + let mut peer_list = PeerList::default(); + + let peer = PeerBuilder::default().build(); + + peer_list.upsert(peer.into()); + + peer_list.remove(&peer.peer_id); + + assert_eq!(peer_list.get(&peer.peer_id), None); + } + + #[test] + fn allow_getting_all_peers_excluding_peers_with_a_given_address() { + let mut peer_list = PeerList::default(); + + let peer1 = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 6969)) + .build(); + peer_list.upsert(peer1.into()); + + let peer2 = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 2)), 6969)) + .build(); + peer_list.upsert(peer2.into()); + + assert_eq!(peer_list.get_peers_excluding_addr(&peer2.peer_addr, None), [Arc::new(peer1)]); + } + + #[test] + fn return_the_number_of_seeders_in_the_list() { + let mut peer_list = PeerList::default(); + + let seeder = PeerBuilder::seeder().build(); + let leecher = PeerBuilder::leecher().build(); + + peer_list.upsert(seeder.into()); + peer_list.upsert(leecher.into()); + + let (seeders, _leechers) = peer_list.seeders_and_leechers(); + + assert_eq!(seeders, 1); + } + + #[test] + fn return_the_number_of_leechers_in_the_list() { + let mut peer_list = PeerList::default(); + + let seeder = PeerBuilder::seeder().build(); + let leecher = PeerBuilder::leecher().build(); + + peer_list.upsert(seeder.into()); + peer_list.upsert(leecher.into()); + + let (_seeders, leechers) = peer_list.seeders_and_leechers(); + + assert_eq!(leechers, 1); + } + + #[test] + fn remove_inactive_peers() { + let mut peer_list = PeerList::default(); + let one_second = DurationSinceUnixEpoch::new(1, 0); + + // Insert the peer + let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); + let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); + peer_list.upsert(peer.into()); + + // Remove peers not updated since one second after inserting the peer + peer_list.remove_inactive_peers(last_update_time + one_second); + + assert_eq!(peer_list.len(), 0); + } + + #[test] + fn not_remove_active_peers() { + let mut peer_list = PeerList::default(); + let one_second = DurationSinceUnixEpoch::new(1, 0); + + // Insert the peer + let last_update_time = DurationSinceUnixEpoch::new(1_669_397_478_934, 0); + let peer = PeerBuilder::default().last_updated_on(last_update_time).build(); + peer_list.upsert(peer.into()); + + // Remove peers not updated since one second before inserting the peer. + peer_list.remove_inactive_peers(last_update_time - one_second); + + assert_eq!(peer_list.len(), 1); + } + + #[test] + fn allow_inserting_two_identical_peers_except_for_the_id() { + let mut peer_list = PeerList::default(); + + let peer1 = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .build(); + peer_list.upsert(peer1.into()); + + let peer2 = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .build(); + peer_list.upsert(peer2.into()); + + assert_eq!(peer_list.len(), 2); + } + } +} diff --git a/packages/torrent-repository/src/entry/rw_lock_parking_lot.rs b/packages/torrent-repository/src/entry/rw_lock_parking_lot.rs new file mode 100644 index 000000000..ac0dc0b30 --- /dev/null +++ b/packages/torrent-repository/src/entry/rw_lock_parking_lot.rs @@ -0,0 +1,49 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; + +use super::{Entry, EntrySync}; +use crate::{EntryRwLockParkingLot, EntrySingle}; + +impl EntrySync for EntryRwLockParkingLot { + fn get_swarm_metadata(&self) -> SwarmMetadata { + self.read().get_swarm_metadata() + } + + fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { + self.read().meets_retaining_policy(policy) + } + + fn peers_is_empty(&self) -> bool { + self.read().peers_is_empty() + } + + fn get_peers_len(&self) -> usize { + self.read().get_peers_len() + } + + fn get_peers(&self, limit: Option) -> Vec> { + self.read().get_peers(limit) + } + + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + self.read().get_peers_for_client(client, limit) + } + + fn upsert_peer(&self, peer: &peer::Peer) -> bool { + self.write().upsert_peer(peer) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + self.write().remove_inactive_peers(current_cutoff); + } +} + +impl From for EntryRwLockParkingLot { + fn from(entry: EntrySingle) -> Self { + Arc::new(parking_lot::RwLock::new(entry)) + } +} diff --git a/packages/torrent-repository/src/entry/single.rs b/packages/torrent-repository/src/entry/single.rs new file mode 100644 index 000000000..6d7ed3155 --- /dev/null +++ b/packages/torrent-repository/src/entry/single.rs @@ -0,0 +1,79 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::announce_event::AnnounceEvent; +use torrust_tracker_primitives::peer::{self}; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::DurationSinceUnixEpoch; + +use super::Entry; +use crate::EntrySingle; + +impl Entry for EntrySingle { + #[allow(clippy::cast_possible_truncation)] + fn get_swarm_metadata(&self) -> SwarmMetadata { + let (seeders, leechers) = self.swarm.seeders_and_leechers(); + + SwarmMetadata { + downloaded: self.downloaded, + complete: seeders as u32, + incomplete: leechers as u32, + } + } + + fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { + if policy.persistent_torrent_completed_stat && self.downloaded > 0 { + return true; + } + + if policy.remove_peerless_torrents && self.swarm.is_empty() { + return false; + } + + true + } + + fn peers_is_empty(&self) -> bool { + self.swarm.is_empty() + } + + fn get_peers_len(&self) -> usize { + self.swarm.len() + } + + fn get_peers(&self, limit: Option) -> Vec> { + self.swarm.get_all(limit) + } + + fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + self.swarm.get_peers_excluding_addr(client, limit) + } + + fn upsert_peer(&mut self, peer: &peer::Peer) -> bool { + let mut downloaded_stats_updated: bool = false; + + match peer::ReadInfo::get_event(peer) { + AnnounceEvent::Stopped => { + drop(self.swarm.remove(&peer::ReadInfo::get_id(peer))); + } + AnnounceEvent::Completed => { + let previous = self.swarm.upsert(Arc::new(*peer)); + // Don't count if peer was not previously known and not already completed. + if previous.is_some_and(|p| p.event != AnnounceEvent::Completed) { + self.downloaded += 1; + downloaded_stats_updated = true; + } + } + _ => { + drop(self.swarm.upsert(Arc::new(*peer))); + } + } + + downloaded_stats_updated + } + + fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { + self.swarm.remove_inactive_peers(current_cutoff); + } +} diff --git a/packages/torrent-repository/src/lib.rs b/packages/torrent-repository/src/lib.rs new file mode 100644 index 000000000..a8955808e --- /dev/null +++ b/packages/torrent-repository/src/lib.rs @@ -0,0 +1,44 @@ +use std::sync::Arc; + +use repository::dash_map_mutex_std::XacrimonDashMap; +use repository::rw_lock_std::RwLockStd; +use repository::rw_lock_tokio::RwLockTokio; +use repository::skip_map_mutex_std::CrossbeamSkipList; +use torrust_tracker_clock::clock; + +pub mod entry; +pub mod repository; + +// Repo Entries + +pub type EntrySingle = entry::Torrent; +pub type EntryMutexStd = Arc>; +pub type EntryMutexTokio = Arc>; +pub type EntryMutexParkingLot = Arc>; +pub type EntryRwLockParkingLot = Arc>; + +// Repos + +pub type TorrentsRwLockStd = RwLockStd; +pub type TorrentsRwLockStdMutexStd = RwLockStd; +pub type TorrentsRwLockStdMutexTokio = RwLockStd; +pub type TorrentsRwLockTokio = RwLockTokio; +pub type TorrentsRwLockTokioMutexStd = RwLockTokio; +pub type TorrentsRwLockTokioMutexTokio = RwLockTokio; + +pub type TorrentsSkipMapMutexStd = CrossbeamSkipList; +pub type TorrentsSkipMapMutexParkingLot = CrossbeamSkipList; +pub type TorrentsSkipMapRwLockParkingLot = CrossbeamSkipList; + +pub type TorrentsDashMapMutexStd = XacrimonDashMap; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; diff --git a/packages/torrent-repository/src/repository/dash_map_mutex_std.rs b/packages/torrent-repository/src/repository/dash_map_mutex_std.rs new file mode 100644 index 000000000..4354c12ec --- /dev/null +++ b/packages/torrent-repository/src/repository/dash_map_mutex_std.rs @@ -0,0 +1,108 @@ +use std::sync::Arc; + +use dashmap::DashMap; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::Repository; +use crate::entry::peer_list::PeerList; +use crate::entry::{Entry, EntrySync}; +use crate::{EntryMutexStd, EntrySingle}; + +#[derive(Default, Debug)] +pub struct XacrimonDashMap { + pub torrents: DashMap, +} + +impl Repository for XacrimonDashMap +where + EntryMutexStd: EntrySync, + EntrySingle: Entry, +{ + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + if let Some(entry) = self.torrents.get(info_hash) { + entry.upsert_peer(peer); + } else { + let _unused = self.torrents.insert(*info_hash, Arc::default()); + if let Some(entry) = self.torrents.get(info_hash) { + entry.upsert_peer(peer); + } + } + } + + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.torrents.get(info_hash).map(|entry| entry.value().get_swarm_metadata()) + } + + fn get(&self, key: &InfoHash) -> Option { + let maybe_entry = self.torrents.get(key); + maybe_entry.map(|entry| entry.clone()) + } + + fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in &self.torrents { + let stats = entry.value().lock().expect("it should get a lock").get_swarm_metadata(); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { + match pagination { + Some(pagination) => self + .torrents + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + None => self + .torrents + .iter() + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + } + } + + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + for (info_hash, completed) in persistent_torrents { + if self.torrents.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexStd::new( + EntrySingle { + swarm: PeerList::default(), + downloaded: *completed, + } + .into(), + ); + + self.torrents.insert(*info_hash, entry); + } + } + + fn remove(&self, key: &InfoHash) -> Option { + self.torrents.remove(key).map(|(_key, value)| value.clone()) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + for entry in &self.torrents { + entry.value().remove_inactive_peers(current_cutoff); + } + } + + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + self.torrents.retain(|_, entry| entry.meets_retaining_policy(policy)); + } +} diff --git a/packages/torrent-repository/src/repository/mod.rs b/packages/torrent-repository/src/repository/mod.rs new file mode 100644 index 000000000..f198288f8 --- /dev/null +++ b/packages/torrent-repository/src/repository/mod.rs @@ -0,0 +1,42 @@ +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +pub mod dash_map_mutex_std; +pub mod rw_lock_std; +pub mod rw_lock_std_mutex_std; +pub mod rw_lock_std_mutex_tokio; +pub mod rw_lock_tokio; +pub mod rw_lock_tokio_mutex_std; +pub mod rw_lock_tokio_mutex_tokio; +pub mod skip_map_mutex_std; + +use std::fmt::Debug; + +pub trait Repository: Debug + Default + Sized + 'static { + fn get(&self, key: &InfoHash) -> Option; + fn get_metrics(&self) -> TorrentsMetrics; + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, T)>; + fn import_persistent(&self, persistent_torrents: &PersistentTorrents); + fn remove(&self, key: &InfoHash) -> Option; + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch); + fn remove_peerless_torrents(&self, policy: &TrackerPolicy); + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer); + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option; +} + +#[allow(clippy::module_name_repetitions)] +pub trait RepositoryAsync: Debug + Default + Sized + 'static { + fn get(&self, key: &InfoHash) -> impl std::future::Future> + Send; + fn get_metrics(&self) -> impl std::future::Future + Send; + fn get_paginated(&self, pagination: Option<&Pagination>) -> impl std::future::Future> + Send; + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) -> impl std::future::Future + Send; + fn remove(&self, key: &InfoHash) -> impl std::future::Future> + Send; + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) -> impl std::future::Future + Send; + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) -> impl std::future::Future + Send; + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) -> impl std::future::Future + Send; + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> impl std::future::Future> + Send; +} diff --git a/packages/torrent-repository/src/repository/rw_lock_std.rs b/packages/torrent-repository/src/repository/rw_lock_std.rs new file mode 100644 index 000000000..5439fdd79 --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_std.rs @@ -0,0 +1,131 @@ +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::Repository; +use crate::entry::peer_list::PeerList; +use crate::entry::Entry; +use crate::{EntrySingle, TorrentsRwLockStd}; + +#[derive(Default, Debug)] +pub struct RwLockStd { + pub(crate) torrents: std::sync::RwLock>, +} + +impl RwLockStd { + /// # Panics + /// + /// Panics if unable to get a lock. + pub fn write( + &self, + ) -> std::sync::RwLockWriteGuard<'_, std::collections::BTreeMap> { + self.torrents.write().expect("it should get lock") + } +} + +impl TorrentsRwLockStd { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("it should get the read lock") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("it should get the write lock") + } +} + +impl Repository for TorrentsRwLockStd +where + EntrySingle: Entry, +{ + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + let mut db = self.get_torrents_mut(); + + let entry = db.entry(*info_hash).or_insert(EntrySingle::default()); + + entry.upsert_peer(peer); + } + + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.get(info_hash).map(|entry| entry.get_swarm_metadata()) + } + + fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in self.get_torrents().values() { + let stats = entry.get_swarm_metadata(); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntrySingle)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut(); + + for (info_hash, downloaded) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = EntrySingle { + swarm: PeerList::default(), + downloaded: *downloaded, + }; + + torrents.insert(*info_hash, entry); + } + } + + fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let mut db = self.get_torrents_mut(); + let entries = db.values_mut(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff); + } + } + + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut(); + + db.retain(|_, e| e.meets_retaining_policy(policy)); + } +} diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs new file mode 100644 index 000000000..7d58b0b10 --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_std.rs @@ -0,0 +1,129 @@ +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::Repository; +use crate::entry::peer_list::PeerList; +use crate::entry::{Entry, EntrySync}; +use crate::{EntryMutexStd, EntrySingle, TorrentsRwLockStdMutexStd}; + +impl TorrentsRwLockStdMutexStd { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("unable to get torrent list") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl Repository for TorrentsRwLockStdMutexStd +where + EntryMutexStd: EntrySync, + EntrySingle: Entry, +{ + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + let maybe_entry = self.get_torrents().get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut(); + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.upsert_peer(peer); + } + + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.get_torrents() + .get(info_hash) + .map(super::super::entry::EntrySync::get_swarm_metadata) + } + + fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in self.get_torrents().values() { + let stats = entry.lock().expect("it should get a lock").get_swarm_metadata(); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut(); + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexStd::new( + EntrySingle { + swarm: PeerList::default(), + downloaded: *completed, + } + .into(), + ); + + torrents.insert(*info_hash, entry); + } + } + + fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let db = self.get_torrents(); + let entries = db.values().cloned(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff); + } + } + + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut(); + + db.retain(|_, e| e.lock().expect("it should lock entry").meets_retaining_policy(policy)); + } +} diff --git a/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs new file mode 100644 index 000000000..90451ca9f --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_std_mutex_tokio.rs @@ -0,0 +1,161 @@ +use std::iter::zip; +use std::pin::Pin; +use std::sync::Arc; + +use futures::future::join_all; +use futures::{Future, FutureExt}; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::RepositoryAsync; +use crate::entry::peer_list::PeerList; +use crate::entry::{Entry, EntryAsync}; +use crate::{EntryMutexTokio, EntrySingle, TorrentsRwLockStdMutexTokio}; + +impl TorrentsRwLockStdMutexTokio { + fn get_torrents<'a>(&'a self) -> std::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().expect("unable to get torrent list") + } + + fn get_torrents_mut<'a>(&'a self) -> std::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().expect("unable to get writable torrent list") + } +} + +impl RepositoryAsync for TorrentsRwLockStdMutexTokio +where + EntryMutexTokio: EntryAsync, + EntrySingle: Entry, +{ + async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + let maybe_entry = self.get_torrents().get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut(); + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.upsert_peer(peer).await; + } + + async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + let maybe_entry = self.get_torrents().get(info_hash).cloned(); + + match maybe_entry { + Some(entry) => Some(entry.get_swarm_metadata().await), + None => None, + } + } + + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents(); + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexTokio)> { + let db = self.get_torrents(); + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + let entries: Vec<_> = self.get_torrents().values().cloned().collect(); + + for entry in entries { + let stats = entry.lock().await.get_swarm_metadata(); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut db = self.get_torrents_mut(); + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if db.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexTokio::new( + EntrySingle { + swarm: PeerList::default(), + downloaded: *completed, + } + .into(), + ); + + db.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut(); + db.remove(key) + } + + async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let handles: Vec + Send>>>; + { + let db = self.get_torrents(); + handles = db + .values() + .cloned() + .map(|e| e.remove_inactive_peers(current_cutoff).boxed()) + .collect(); + } + join_all(handles).await; + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let handles: Vec> + Send>>>; + + { + let db = self.get_torrents(); + + handles = zip(db.keys().copied(), db.values().cloned()) + .map(|(infohash, torrent)| { + torrent + .meets_retaining_policy(policy) + .map(move |should_be_retained| if should_be_retained { None } else { Some(infohash) }) + .boxed() + }) + .collect::>(); + } + + let not_good = join_all(handles).await; + + let mut db = self.get_torrents_mut(); + + for remove in not_good.into_iter().flatten() { + drop(db.remove(&remove)); + } + } +} diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio.rs new file mode 100644 index 000000000..baaa01232 --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_tokio.rs @@ -0,0 +1,135 @@ +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::RepositoryAsync; +use crate::entry::peer_list::PeerList; +use crate::entry::Entry; +use crate::{EntrySingle, TorrentsRwLockTokio}; + +#[derive(Default, Debug)] +pub struct RwLockTokio { + pub(crate) torrents: tokio::sync::RwLock>, +} + +impl RwLockTokio { + pub fn write( + &self, + ) -> impl std::future::Future< + Output = tokio::sync::RwLockWriteGuard< + '_, + std::collections::BTreeMap, + >, + > { + self.torrents.write() + } +} + +impl TorrentsRwLockTokio { + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl RepositoryAsync for TorrentsRwLockTokio +where + EntrySingle: Entry, +{ + async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + let mut db = self.get_torrents_mut().await; + + let entry = db.entry(*info_hash).or_insert(EntrySingle::default()); + + entry.upsert_peer(peer); + } + + async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.get(info_hash).await.map(|entry| entry.get_swarm_metadata()) + } + + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntrySingle)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in self.get_torrents().await.values() { + let stats = entry.get_swarm_metadata(); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = EntrySingle { + swarm: PeerList::default(), + downloaded: *completed, + }; + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let mut db = self.get_torrents_mut().await; + let entries = db.values_mut(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff); + } + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + db.retain(|_, e| e.meets_retaining_policy(policy)); + } +} diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs new file mode 100644 index 000000000..1887f70c7 --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_std.rs @@ -0,0 +1,129 @@ +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::RepositoryAsync; +use crate::entry::peer_list::PeerList; +use crate::entry::{Entry, EntrySync}; +use crate::{EntryMutexStd, EntrySingle, TorrentsRwLockTokioMutexStd}; + +impl TorrentsRwLockTokioMutexStd { + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl RepositoryAsync for TorrentsRwLockTokioMutexStd +where + EntryMutexStd: EntrySync, + EntrySingle: Entry, +{ + async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + let maybe_entry = self.get_torrents().await.get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut().await; + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.upsert_peer(peer); + } + + async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.get(info_hash).await.map(|entry| entry.get_swarm_metadata()) + } + + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in self.get_torrents().await.values() { + let stats = entry.get_swarm_metadata(); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut torrents = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if torrents.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexStd::new( + EntrySingle { + swarm: PeerList::default(), + downloaded: *completed, + } + .into(), + ); + + torrents.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let db = self.get_torrents().await; + let entries = db.values().cloned(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff); + } + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + db.retain(|_, e| e.lock().expect("it should lock entry").meets_retaining_policy(policy)); + } +} diff --git a/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs new file mode 100644 index 000000000..6c9c08a73 --- /dev/null +++ b/packages/torrent-repository/src/repository/rw_lock_tokio_mutex_tokio.rs @@ -0,0 +1,142 @@ +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::RepositoryAsync; +use crate::entry::peer_list::PeerList; +use crate::entry::{Entry, EntryAsync}; +use crate::{EntryMutexTokio, EntrySingle, TorrentsRwLockTokioMutexTokio}; + +impl TorrentsRwLockTokioMutexTokio { + async fn get_torrents<'a>(&'a self) -> tokio::sync::RwLockReadGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.read().await + } + + async fn get_torrents_mut<'a>( + &'a self, + ) -> tokio::sync::RwLockWriteGuard<'a, std::collections::BTreeMap> + where + std::collections::BTreeMap: 'a, + { + self.torrents.write().await + } +} + +impl RepositoryAsync for TorrentsRwLockTokioMutexTokio +where + EntryMutexTokio: EntryAsync, + EntrySingle: Entry, +{ + async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + let maybe_entry = self.get_torrents().await.get(info_hash).cloned(); + + let entry = if let Some(entry) = maybe_entry { + entry + } else { + let mut db = self.get_torrents_mut().await; + let entry = db.entry(*info_hash).or_insert(Arc::default()); + entry.clone() + }; + + entry.upsert_peer(peer).await; + } + + async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + match self.get(info_hash).await { + Some(entry) => Some(entry.get_swarm_metadata().await), + None => None, + } + } + + async fn get(&self, key: &InfoHash) -> Option { + let db = self.get_torrents().await; + db.get(key).cloned() + } + + async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexTokio)> { + let db = self.get_torrents().await; + + match pagination { + Some(pagination) => db + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|(a, b)| (*a, b.clone())) + .collect(), + None => db.iter().map(|(a, b)| (*a, b.clone())).collect(), + } + } + + async fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in self.get_torrents().await.values() { + let stats = entry.get_swarm_metadata().await; + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + let mut db = self.get_torrents_mut().await; + + for (info_hash, completed) in persistent_torrents { + // Skip if torrent entry already exists + if db.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexTokio::new( + EntrySingle { + swarm: PeerList::default(), + downloaded: *completed, + } + .into(), + ); + + db.insert(*info_hash, entry); + } + } + + async fn remove(&self, key: &InfoHash) -> Option { + let mut db = self.get_torrents_mut().await; + db.remove(key) + } + + async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + let db = self.get_torrents().await; + let entries = db.values().cloned(); + + for entry in entries { + entry.remove_inactive_peers(current_cutoff).await; + } + } + + async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + let mut db = self.get_torrents_mut().await; + + let mut not_good = Vec::::default(); + + for (&infohash, torrent) in db.iter() { + if !torrent.clone().meets_retaining_policy(policy).await { + not_good.push(infohash); + } + } + + for remove in not_good { + drop(db.remove(&remove)); + } + } +} diff --git a/packages/torrent-repository/src/repository/skip_map_mutex_std.rs b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs new file mode 100644 index 000000000..dd0d9c1b1 --- /dev/null +++ b/packages/torrent-repository/src/repository/skip_map_mutex_std.rs @@ -0,0 +1,292 @@ +use std::sync::Arc; + +use crossbeam_skiplist::SkipMap; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; + +use super::Repository; +use crate::entry::peer_list::PeerList; +use crate::entry::{Entry, EntrySync}; +use crate::{EntryMutexParkingLot, EntryMutexStd, EntryRwLockParkingLot, EntrySingle}; + +#[derive(Default, Debug)] +pub struct CrossbeamSkipList { + pub torrents: SkipMap, +} + +impl Repository for CrossbeamSkipList +where + EntryMutexStd: EntrySync, + EntrySingle: Entry, +{ + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + let entry = self.torrents.get_or_insert(*info_hash, Arc::default()); + entry.value().upsert_peer(peer); + } + + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.torrents.get(info_hash).map(|entry| entry.value().get_swarm_metadata()) + } + + fn get(&self, key: &InfoHash) -> Option { + let maybe_entry = self.torrents.get(key); + maybe_entry.map(|entry| entry.value().clone()) + } + + fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in &self.torrents { + let stats = entry.value().lock().expect("it should get a lock").get_swarm_metadata(); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexStd)> { + match pagination { + Some(pagination) => self + .torrents + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + None => self + .torrents + .iter() + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + } + } + + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + for (info_hash, completed) in persistent_torrents { + if self.torrents.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexStd::new( + EntrySingle { + swarm: PeerList::default(), + downloaded: *completed, + } + .into(), + ); + + // Since SkipMap is lock-free the torrent could have been inserted + // after checking if it exists. + self.torrents.get_or_insert(*info_hash, entry); + } + } + + fn remove(&self, key: &InfoHash) -> Option { + self.torrents.remove(key).map(|entry| entry.value().clone()) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + for entry in &self.torrents { + entry.value().remove_inactive_peers(current_cutoff); + } + } + + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + for entry in &self.torrents { + if entry.value().meets_retaining_policy(policy) { + continue; + } + + entry.remove(); + } + } +} + +impl Repository for CrossbeamSkipList +where + EntryRwLockParkingLot: EntrySync, + EntrySingle: Entry, +{ + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + let entry = self.torrents.get_or_insert(*info_hash, Arc::default()); + entry.value().upsert_peer(peer); + } + + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.torrents.get(info_hash).map(|entry| entry.value().get_swarm_metadata()) + } + + fn get(&self, key: &InfoHash) -> Option { + let maybe_entry = self.torrents.get(key); + maybe_entry.map(|entry| entry.value().clone()) + } + + fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in &self.torrents { + let stats = entry.value().read().get_swarm_metadata(); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryRwLockParkingLot)> { + match pagination { + Some(pagination) => self + .torrents + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + None => self + .torrents + .iter() + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + } + } + + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + for (info_hash, completed) in persistent_torrents { + if self.torrents.contains_key(info_hash) { + continue; + } + + let entry = EntryRwLockParkingLot::new( + EntrySingle { + swarm: PeerList::default(), + downloaded: *completed, + } + .into(), + ); + + // Since SkipMap is lock-free the torrent could have been inserted + // after checking if it exists. + self.torrents.get_or_insert(*info_hash, entry); + } + } + + fn remove(&self, key: &InfoHash) -> Option { + self.torrents.remove(key).map(|entry| entry.value().clone()) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + for entry in &self.torrents { + entry.value().remove_inactive_peers(current_cutoff); + } + } + + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + for entry in &self.torrents { + if entry.value().meets_retaining_policy(policy) { + continue; + } + + entry.remove(); + } + } +} + +impl Repository for CrossbeamSkipList +where + EntryMutexParkingLot: EntrySync, + EntrySingle: Entry, +{ + fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + let entry = self.torrents.get_or_insert(*info_hash, Arc::default()); + entry.value().upsert_peer(peer); + } + + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + self.torrents.get(info_hash).map(|entry| entry.value().get_swarm_metadata()) + } + + fn get(&self, key: &InfoHash) -> Option { + let maybe_entry = self.torrents.get(key); + maybe_entry.map(|entry| entry.value().clone()) + } + + fn get_metrics(&self) -> TorrentsMetrics { + let mut metrics = TorrentsMetrics::default(); + + for entry in &self.torrents { + let stats = entry.value().lock().get_swarm_metadata(); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + metrics.incomplete += u64::from(stats.incomplete); + metrics.torrents += 1; + } + + metrics + } + + fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntryMutexParkingLot)> { + match pagination { + Some(pagination) => self + .torrents + .iter() + .skip(pagination.offset as usize) + .take(pagination.limit as usize) + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + None => self + .torrents + .iter() + .map(|entry| (*entry.key(), entry.value().clone())) + .collect(), + } + } + + fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + for (info_hash, completed) in persistent_torrents { + if self.torrents.contains_key(info_hash) { + continue; + } + + let entry = EntryMutexParkingLot::new( + EntrySingle { + swarm: PeerList::default(), + downloaded: *completed, + } + .into(), + ); + + // Since SkipMap is lock-free the torrent could have been inserted + // after checking if it exists. + self.torrents.get_or_insert(*info_hash, entry); + } + } + + fn remove(&self, key: &InfoHash) -> Option { + self.torrents.remove(key).map(|entry| entry.value().clone()) + } + + fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + for entry in &self.torrents { + entry.value().remove_inactive_peers(current_cutoff); + } + } + + fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + for entry in &self.torrents { + if entry.value().meets_retaining_policy(policy) { + continue; + } + + entry.remove(); + } + } +} diff --git a/packages/torrent-repository/tests/common/mod.rs b/packages/torrent-repository/tests/common/mod.rs new file mode 100644 index 000000000..efdf7f742 --- /dev/null +++ b/packages/torrent-repository/tests/common/mod.rs @@ -0,0 +1,3 @@ +pub mod repo; +pub mod torrent; +pub mod torrent_peer_builder; diff --git a/packages/torrent-repository/tests/common/repo.rs b/packages/torrent-repository/tests/common/repo.rs new file mode 100644 index 000000000..f317d0d17 --- /dev/null +++ b/packages/torrent-repository/tests/common/repo.rs @@ -0,0 +1,238 @@ +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, PersistentTorrents}; +use torrust_tracker_torrent_repository::repository::{Repository as _, RepositoryAsync as _}; +use torrust_tracker_torrent_repository::{ + EntrySingle, TorrentsDashMapMutexStd, TorrentsRwLockStd, TorrentsRwLockStdMutexStd, TorrentsRwLockStdMutexTokio, + TorrentsRwLockTokio, TorrentsRwLockTokioMutexStd, TorrentsRwLockTokioMutexTokio, TorrentsSkipMapMutexParkingLot, + TorrentsSkipMapMutexStd, TorrentsSkipMapRwLockParkingLot, +}; + +#[derive(Debug)] +pub(crate) enum Repo { + RwLockStd(TorrentsRwLockStd), + RwLockStdMutexStd(TorrentsRwLockStdMutexStd), + RwLockStdMutexTokio(TorrentsRwLockStdMutexTokio), + RwLockTokio(TorrentsRwLockTokio), + RwLockTokioMutexStd(TorrentsRwLockTokioMutexStd), + RwLockTokioMutexTokio(TorrentsRwLockTokioMutexTokio), + SkipMapMutexStd(TorrentsSkipMapMutexStd), + SkipMapMutexParkingLot(TorrentsSkipMapMutexParkingLot), + SkipMapRwLockParkingLot(TorrentsSkipMapRwLockParkingLot), + DashMapMutexStd(TorrentsDashMapMutexStd), +} + +impl Repo { + pub(crate) async fn upsert_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + match self { + Repo::RwLockStd(repo) => repo.upsert_peer(info_hash, peer), + Repo::RwLockStdMutexStd(repo) => repo.upsert_peer(info_hash, peer), + Repo::RwLockStdMutexTokio(repo) => repo.upsert_peer(info_hash, peer).await, + Repo::RwLockTokio(repo) => repo.upsert_peer(info_hash, peer).await, + Repo::RwLockTokioMutexStd(repo) => repo.upsert_peer(info_hash, peer).await, + Repo::RwLockTokioMutexTokio(repo) => repo.upsert_peer(info_hash, peer).await, + Repo::SkipMapMutexStd(repo) => repo.upsert_peer(info_hash, peer), + Repo::SkipMapMutexParkingLot(repo) => repo.upsert_peer(info_hash, peer), + Repo::SkipMapRwLockParkingLot(repo) => repo.upsert_peer(info_hash, peer), + Repo::DashMapMutexStd(repo) => repo.upsert_peer(info_hash, peer), + } + } + + pub(crate) async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> Option { + match self { + Repo::RwLockStd(repo) => repo.get_swarm_metadata(info_hash), + Repo::RwLockStdMutexStd(repo) => repo.get_swarm_metadata(info_hash), + Repo::RwLockStdMutexTokio(repo) => repo.get_swarm_metadata(info_hash).await, + Repo::RwLockTokio(repo) => repo.get_swarm_metadata(info_hash).await, + Repo::RwLockTokioMutexStd(repo) => repo.get_swarm_metadata(info_hash).await, + Repo::RwLockTokioMutexTokio(repo) => repo.get_swarm_metadata(info_hash).await, + Repo::SkipMapMutexStd(repo) => repo.get_swarm_metadata(info_hash), + Repo::SkipMapMutexParkingLot(repo) => repo.get_swarm_metadata(info_hash), + Repo::SkipMapRwLockParkingLot(repo) => repo.get_swarm_metadata(info_hash), + Repo::DashMapMutexStd(repo) => repo.get_swarm_metadata(info_hash), + } + } + + pub(crate) async fn get(&self, key: &InfoHash) -> Option { + match self { + Repo::RwLockStd(repo) => repo.get(key), + Repo::RwLockStdMutexStd(repo) => Some(repo.get(key)?.lock().unwrap().clone()), + Repo::RwLockStdMutexTokio(repo) => Some(repo.get(key).await?.lock().await.clone()), + Repo::RwLockTokio(repo) => repo.get(key).await, + Repo::RwLockTokioMutexStd(repo) => Some(repo.get(key).await?.lock().unwrap().clone()), + Repo::RwLockTokioMutexTokio(repo) => Some(repo.get(key).await?.lock().await.clone()), + Repo::SkipMapMutexStd(repo) => Some(repo.get(key)?.lock().unwrap().clone()), + Repo::SkipMapMutexParkingLot(repo) => Some(repo.get(key)?.lock().clone()), + Repo::SkipMapRwLockParkingLot(repo) => Some(repo.get(key)?.read().clone()), + Repo::DashMapMutexStd(repo) => Some(repo.get(key)?.lock().unwrap().clone()), + } + } + + pub(crate) async fn get_metrics(&self) -> TorrentsMetrics { + match self { + Repo::RwLockStd(repo) => repo.get_metrics(), + Repo::RwLockStdMutexStd(repo) => repo.get_metrics(), + Repo::RwLockStdMutexTokio(repo) => repo.get_metrics().await, + Repo::RwLockTokio(repo) => repo.get_metrics().await, + Repo::RwLockTokioMutexStd(repo) => repo.get_metrics().await, + Repo::RwLockTokioMutexTokio(repo) => repo.get_metrics().await, + Repo::SkipMapMutexStd(repo) => repo.get_metrics(), + Repo::SkipMapMutexParkingLot(repo) => repo.get_metrics(), + Repo::SkipMapRwLockParkingLot(repo) => repo.get_metrics(), + Repo::DashMapMutexStd(repo) => repo.get_metrics(), + } + } + + pub(crate) async fn get_paginated(&self, pagination: Option<&Pagination>) -> Vec<(InfoHash, EntrySingle)> { + match self { + Repo::RwLockStd(repo) => repo.get_paginated(pagination), + Repo::RwLockStdMutexStd(repo) => repo + .get_paginated(pagination) + .iter() + .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) + .collect(), + Repo::RwLockStdMutexTokio(repo) => { + let mut v: Vec<(InfoHash, EntrySingle)> = vec![]; + + for (i, t) in repo.get_paginated(pagination).await { + v.push((i, t.lock().await.clone())); + } + v + } + Repo::RwLockTokio(repo) => repo.get_paginated(pagination).await, + Repo::RwLockTokioMutexStd(repo) => repo + .get_paginated(pagination) + .await + .iter() + .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) + .collect(), + Repo::RwLockTokioMutexTokio(repo) => { + let mut v: Vec<(InfoHash, EntrySingle)> = vec![]; + + for (i, t) in repo.get_paginated(pagination).await { + v.push((i, t.lock().await.clone())); + } + v + } + Repo::SkipMapMutexStd(repo) => repo + .get_paginated(pagination) + .iter() + .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) + .collect(), + Repo::SkipMapMutexParkingLot(repo) => repo + .get_paginated(pagination) + .iter() + .map(|(i, t)| (*i, t.lock().clone())) + .collect(), + Repo::SkipMapRwLockParkingLot(repo) => repo + .get_paginated(pagination) + .iter() + .map(|(i, t)| (*i, t.read().clone())) + .collect(), + Repo::DashMapMutexStd(repo) => repo + .get_paginated(pagination) + .iter() + .map(|(i, t)| (*i, t.lock().expect("it should get a lock").clone())) + .collect(), + } + } + + pub(crate) async fn import_persistent(&self, persistent_torrents: &PersistentTorrents) { + match self { + Repo::RwLockStd(repo) => repo.import_persistent(persistent_torrents), + Repo::RwLockStdMutexStd(repo) => repo.import_persistent(persistent_torrents), + Repo::RwLockStdMutexTokio(repo) => repo.import_persistent(persistent_torrents).await, + Repo::RwLockTokio(repo) => repo.import_persistent(persistent_torrents).await, + Repo::RwLockTokioMutexStd(repo) => repo.import_persistent(persistent_torrents).await, + Repo::RwLockTokioMutexTokio(repo) => repo.import_persistent(persistent_torrents).await, + Repo::SkipMapMutexStd(repo) => repo.import_persistent(persistent_torrents), + Repo::SkipMapMutexParkingLot(repo) => repo.import_persistent(persistent_torrents), + Repo::SkipMapRwLockParkingLot(repo) => repo.import_persistent(persistent_torrents), + Repo::DashMapMutexStd(repo) => repo.import_persistent(persistent_torrents), + } + } + + pub(crate) async fn remove(&self, key: &InfoHash) -> Option { + match self { + Repo::RwLockStd(repo) => repo.remove(key), + Repo::RwLockStdMutexStd(repo) => Some(repo.remove(key)?.lock().unwrap().clone()), + Repo::RwLockStdMutexTokio(repo) => Some(repo.remove(key).await?.lock().await.clone()), + Repo::RwLockTokio(repo) => repo.remove(key).await, + Repo::RwLockTokioMutexStd(repo) => Some(repo.remove(key).await?.lock().unwrap().clone()), + Repo::RwLockTokioMutexTokio(repo) => Some(repo.remove(key).await?.lock().await.clone()), + Repo::SkipMapMutexStd(repo) => Some(repo.remove(key)?.lock().unwrap().clone()), + Repo::SkipMapMutexParkingLot(repo) => Some(repo.remove(key)?.lock().clone()), + Repo::SkipMapRwLockParkingLot(repo) => Some(repo.remove(key)?.write().clone()), + Repo::DashMapMutexStd(repo) => Some(repo.remove(key)?.lock().unwrap().clone()), + } + } + + pub(crate) async fn remove_inactive_peers(&self, current_cutoff: DurationSinceUnixEpoch) { + match self { + Repo::RwLockStd(repo) => repo.remove_inactive_peers(current_cutoff), + Repo::RwLockStdMutexStd(repo) => repo.remove_inactive_peers(current_cutoff), + Repo::RwLockStdMutexTokio(repo) => repo.remove_inactive_peers(current_cutoff).await, + Repo::RwLockTokio(repo) => repo.remove_inactive_peers(current_cutoff).await, + Repo::RwLockTokioMutexStd(repo) => repo.remove_inactive_peers(current_cutoff).await, + Repo::RwLockTokioMutexTokio(repo) => repo.remove_inactive_peers(current_cutoff).await, + Repo::SkipMapMutexStd(repo) => repo.remove_inactive_peers(current_cutoff), + Repo::SkipMapMutexParkingLot(repo) => repo.remove_inactive_peers(current_cutoff), + Repo::SkipMapRwLockParkingLot(repo) => repo.remove_inactive_peers(current_cutoff), + Repo::DashMapMutexStd(repo) => repo.remove_inactive_peers(current_cutoff), + } + } + + pub(crate) async fn remove_peerless_torrents(&self, policy: &TrackerPolicy) { + match self { + Repo::RwLockStd(repo) => repo.remove_peerless_torrents(policy), + Repo::RwLockStdMutexStd(repo) => repo.remove_peerless_torrents(policy), + Repo::RwLockStdMutexTokio(repo) => repo.remove_peerless_torrents(policy).await, + Repo::RwLockTokio(repo) => repo.remove_peerless_torrents(policy).await, + Repo::RwLockTokioMutexStd(repo) => repo.remove_peerless_torrents(policy).await, + Repo::RwLockTokioMutexTokio(repo) => repo.remove_peerless_torrents(policy).await, + Repo::SkipMapMutexStd(repo) => repo.remove_peerless_torrents(policy), + Repo::SkipMapMutexParkingLot(repo) => repo.remove_peerless_torrents(policy), + Repo::SkipMapRwLockParkingLot(repo) => repo.remove_peerless_torrents(policy), + Repo::DashMapMutexStd(repo) => repo.remove_peerless_torrents(policy), + } + } + + pub(crate) async fn insert(&self, info_hash: &InfoHash, torrent: EntrySingle) -> Option { + match self { + Repo::RwLockStd(repo) => { + repo.write().insert(*info_hash, torrent); + } + Repo::RwLockStdMutexStd(repo) => { + repo.write().insert(*info_hash, torrent.into()); + } + Repo::RwLockStdMutexTokio(repo) => { + repo.write().insert(*info_hash, torrent.into()); + } + Repo::RwLockTokio(repo) => { + repo.write().await.insert(*info_hash, torrent); + } + Repo::RwLockTokioMutexStd(repo) => { + repo.write().await.insert(*info_hash, torrent.into()); + } + Repo::RwLockTokioMutexTokio(repo) => { + repo.write().await.insert(*info_hash, torrent.into()); + } + Repo::SkipMapMutexStd(repo) => { + repo.torrents.insert(*info_hash, torrent.into()); + } + Repo::SkipMapMutexParkingLot(repo) => { + repo.torrents.insert(*info_hash, torrent.into()); + } + Repo::SkipMapRwLockParkingLot(repo) => { + repo.torrents.insert(*info_hash, torrent.into()); + } + Repo::DashMapMutexStd(repo) => { + repo.torrents.insert(*info_hash, torrent.into()); + } + }; + self.get(info_hash).await + } +} diff --git a/packages/torrent-repository/tests/common/torrent.rs b/packages/torrent-repository/tests/common/torrent.rs new file mode 100644 index 000000000..927f13169 --- /dev/null +++ b/packages/torrent-repository/tests/common/torrent.rs @@ -0,0 +1,101 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; +use torrust_tracker_torrent_repository::entry::{Entry as _, EntryAsync as _, EntrySync as _}; +use torrust_tracker_torrent_repository::{ + EntryMutexParkingLot, EntryMutexStd, EntryMutexTokio, EntryRwLockParkingLot, EntrySingle, +}; + +#[derive(Debug, Clone)] +pub(crate) enum Torrent { + Single(EntrySingle), + MutexStd(EntryMutexStd), + MutexTokio(EntryMutexTokio), + MutexParkingLot(EntryMutexParkingLot), + RwLockParkingLot(EntryRwLockParkingLot), +} + +impl Torrent { + pub(crate) async fn get_stats(&self) -> SwarmMetadata { + match self { + Torrent::Single(entry) => entry.get_swarm_metadata(), + Torrent::MutexStd(entry) => entry.get_swarm_metadata(), + Torrent::MutexTokio(entry) => entry.clone().get_swarm_metadata().await, + Torrent::MutexParkingLot(entry) => entry.clone().get_swarm_metadata(), + Torrent::RwLockParkingLot(entry) => entry.clone().get_swarm_metadata(), + } + } + + pub(crate) async fn meets_retaining_policy(&self, policy: &TrackerPolicy) -> bool { + match self { + Torrent::Single(entry) => entry.meets_retaining_policy(policy), + Torrent::MutexStd(entry) => entry.meets_retaining_policy(policy), + Torrent::MutexTokio(entry) => entry.clone().meets_retaining_policy(policy).await, + Torrent::MutexParkingLot(entry) => entry.meets_retaining_policy(policy), + Torrent::RwLockParkingLot(entry) => entry.meets_retaining_policy(policy), + } + } + + pub(crate) async fn peers_is_empty(&self) -> bool { + match self { + Torrent::Single(entry) => entry.peers_is_empty(), + Torrent::MutexStd(entry) => entry.peers_is_empty(), + Torrent::MutexTokio(entry) => entry.clone().peers_is_empty().await, + Torrent::MutexParkingLot(entry) => entry.peers_is_empty(), + Torrent::RwLockParkingLot(entry) => entry.peers_is_empty(), + } + } + + pub(crate) async fn get_peers_len(&self) -> usize { + match self { + Torrent::Single(entry) => entry.get_peers_len(), + Torrent::MutexStd(entry) => entry.get_peers_len(), + Torrent::MutexTokio(entry) => entry.clone().get_peers_len().await, + Torrent::MutexParkingLot(entry) => entry.get_peers_len(), + Torrent::RwLockParkingLot(entry) => entry.get_peers_len(), + } + } + + pub(crate) async fn get_peers(&self, limit: Option) -> Vec> { + match self { + Torrent::Single(entry) => entry.get_peers(limit), + Torrent::MutexStd(entry) => entry.get_peers(limit), + Torrent::MutexTokio(entry) => entry.clone().get_peers(limit).await, + Torrent::MutexParkingLot(entry) => entry.get_peers(limit), + Torrent::RwLockParkingLot(entry) => entry.get_peers(limit), + } + } + + pub(crate) async fn get_peers_for_client(&self, client: &SocketAddr, limit: Option) -> Vec> { + match self { + Torrent::Single(entry) => entry.get_peers_for_client(client, limit), + Torrent::MutexStd(entry) => entry.get_peers_for_client(client, limit), + Torrent::MutexTokio(entry) => entry.clone().get_peers_for_client(client, limit).await, + Torrent::MutexParkingLot(entry) => entry.get_peers_for_client(client, limit), + Torrent::RwLockParkingLot(entry) => entry.get_peers_for_client(client, limit), + } + } + + pub(crate) async fn upsert_peer(&mut self, peer: &peer::Peer) -> bool { + match self { + Torrent::Single(entry) => entry.upsert_peer(peer), + Torrent::MutexStd(entry) => entry.upsert_peer(peer), + Torrent::MutexTokio(entry) => entry.clone().upsert_peer(peer).await, + Torrent::MutexParkingLot(entry) => entry.upsert_peer(peer), + Torrent::RwLockParkingLot(entry) => entry.upsert_peer(peer), + } + } + + pub(crate) async fn remove_inactive_peers(&mut self, current_cutoff: DurationSinceUnixEpoch) { + match self { + Torrent::Single(entry) => entry.remove_inactive_peers(current_cutoff), + Torrent::MutexStd(entry) => entry.remove_inactive_peers(current_cutoff), + Torrent::MutexTokio(entry) => entry.clone().remove_inactive_peers(current_cutoff).await, + Torrent::MutexParkingLot(entry) => entry.remove_inactive_peers(current_cutoff), + Torrent::RwLockParkingLot(entry) => entry.remove_inactive_peers(current_cutoff), + } + } +} diff --git a/packages/torrent-repository/tests/common/torrent_peer_builder.rs b/packages/torrent-repository/tests/common/torrent_peer_builder.rs new file mode 100644 index 000000000..3a4e61ed2 --- /dev/null +++ b/packages/torrent-repository/tests/common/torrent_peer_builder.rs @@ -0,0 +1,88 @@ +use std::net::SocketAddr; + +use torrust_tracker_clock::clock::Time; +use torrust_tracker_primitives::announce_event::AnnounceEvent; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; + +use crate::CurrentClock; + +#[derive(Debug, Default)] +struct TorrentPeerBuilder { + peer: peer::Peer, +} + +#[allow(dead_code)] +impl TorrentPeerBuilder { + #[must_use] + fn new() -> Self { + Self { + peer: peer::Peer { + updated: CurrentClock::now(), + ..Default::default() + }, + } + } + + #[must_use] + fn with_event_completed(mut self) -> Self { + self.peer.event = AnnounceEvent::Completed; + self + } + + #[must_use] + fn with_event_started(mut self) -> Self { + self.peer.event = AnnounceEvent::Started; + self + } + + #[must_use] + fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { + self.peer.peer_addr = peer_addr; + self + } + + #[must_use] + fn with_peer_id(mut self, peer_id: peer::Id) -> Self { + self.peer.peer_id = peer_id; + self + } + + #[must_use] + fn with_number_of_bytes_left(mut self, left: i64) -> Self { + self.peer.left = NumberOfBytes(left); + self + } + + #[must_use] + fn updated_at(mut self, updated: DurationSinceUnixEpoch) -> Self { + self.peer.updated = updated; + self + } + + #[must_use] + fn into(self) -> peer::Peer { + self.peer + } +} + +/// A torrent seeder is a peer with 0 bytes left to download which +/// has not announced it has stopped +#[must_use] +pub fn a_completed_peer(id: i32) -> peer::Peer { + TorrentPeerBuilder::new() + .with_number_of_bytes_left(0) + .with_event_completed() + .with_peer_id(id.into()) + .into() +} + +/// A torrent leecher is a peer that is not a seeder. +/// Leecher: left > 0 OR event = Stopped +#[must_use] +pub fn a_started_peer(id: i32) -> peer::Peer { + TorrentPeerBuilder::new() + .with_number_of_bytes_left(1) + .with_event_started() + .with_peer_id(id.into()) + .into() +} diff --git a/packages/torrent-repository/tests/entry/mod.rs b/packages/torrent-repository/tests/entry/mod.rs new file mode 100644 index 000000000..2a7063a4f --- /dev/null +++ b/packages/torrent-repository/tests/entry/mod.rs @@ -0,0 +1,443 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use std::ops::Sub; +use std::time::Duration; + +use rstest::{fixture, rstest}; +use torrust_tracker_clock::clock::stopped::Stopped as _; +use torrust_tracker_clock::clock::{self, Time as _}; +use torrust_tracker_configuration::{TrackerPolicy, TORRENT_PEERS_LIMIT}; +use torrust_tracker_primitives::announce_event::AnnounceEvent; +use torrust_tracker_primitives::peer::Peer; +use torrust_tracker_primitives::{peer, NumberOfBytes}; +use torrust_tracker_torrent_repository::{ + EntryMutexParkingLot, EntryMutexStd, EntryMutexTokio, EntryRwLockParkingLot, EntrySingle, +}; + +use crate::common::torrent::Torrent; +use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; +use crate::CurrentClock; + +#[fixture] +fn single() -> Torrent { + Torrent::Single(EntrySingle::default()) +} +#[fixture] +fn mutex_std() -> Torrent { + Torrent::MutexStd(EntryMutexStd::default()) +} + +#[fixture] +fn mutex_tokio() -> Torrent { + Torrent::MutexTokio(EntryMutexTokio::default()) +} + +#[fixture] +fn mutex_parking_lot() -> Torrent { + Torrent::MutexParkingLot(EntryMutexParkingLot::default()) +} + +#[fixture] +fn rw_lock_parking_lot() -> Torrent { + Torrent::RwLockParkingLot(EntryRwLockParkingLot::default()) +} + +#[fixture] +fn policy_none() -> TrackerPolicy { + TrackerPolicy::new(0, false, false) +} + +#[fixture] +fn policy_persist() -> TrackerPolicy { + TrackerPolicy::new(0, true, false) +} + +#[fixture] +fn policy_remove() -> TrackerPolicy { + TrackerPolicy::new(0, false, true) +} + +#[fixture] +fn policy_remove_persist() -> TrackerPolicy { + TrackerPolicy::new(0, true, true) +} + +pub enum Makes { + Empty, + Started, + Completed, + Downloaded, + Three, +} + +async fn make(torrent: &mut Torrent, makes: &Makes) -> Vec { + match makes { + Makes::Empty => vec![], + Makes::Started => { + let peer = a_started_peer(1); + torrent.upsert_peer(&peer).await; + vec![peer] + } + Makes::Completed => { + let peer = a_completed_peer(2); + torrent.upsert_peer(&peer).await; + vec![peer] + } + Makes::Downloaded => { + let mut peer = a_started_peer(3); + torrent.upsert_peer(&peer).await; + peer.event = AnnounceEvent::Completed; + peer.left = NumberOfBytes(0); + torrent.upsert_peer(&peer).await; + vec![peer] + } + Makes::Three => { + let peer_1 = a_started_peer(1); + torrent.upsert_peer(&peer_1).await; + + let peer_2 = a_completed_peer(2); + torrent.upsert_peer(&peer_2).await; + + let mut peer_3 = a_started_peer(3); + torrent.upsert_peer(&peer_3).await; + peer_3.event = AnnounceEvent::Completed; + peer_3.left = NumberOfBytes(0); + torrent.upsert_peer(&peer_3).await; + vec![peer_1, peer_2, peer_3] + } + } +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[tokio::test] +async fn it_should_be_empty_by_default( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + make(&mut torrent, makes).await; + + assert_eq!(torrent.get_peers_len().await, 0); +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_check_if_entry_should_be_retained_based_on_the_tracker_policy( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, + #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, +) { + make(&mut torrent, makes).await; + + let has_peers = !torrent.peers_is_empty().await; + let has_downloads = torrent.get_stats().await.downloaded != 0; + + match (policy.remove_peerless_torrents, policy.persistent_torrent_completed_stat) { + // remove torrents without peers, and keep completed download stats + (true, true) => match (has_peers, has_downloads) { + // no peers, but has downloads + // peers, with or without downloads + (false, true) | (true, true | false) => assert!(torrent.meets_retaining_policy(&policy).await), + // no peers and no downloads + (false, false) => assert!(!torrent.meets_retaining_policy(&policy).await), + }, + // remove torrents without peers and drop completed download stats + (true, false) => match (has_peers, has_downloads) { + // peers, with or without downloads + (true, true | false) => assert!(torrent.meets_retaining_policy(&policy).await), + // no peers and with or without downloads + (false, true | false) => assert!(!torrent.meets_retaining_policy(&policy).await), + }, + // keep torrents without peers, but keep or drop completed download stats + (false, true | false) => assert!(torrent.meets_retaining_policy(&policy).await), + } +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_get_peers_for_torrent_entry( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + let peers = make(&mut torrent, makes).await; + + let torrent_peers = torrent.get_peers(None).await; + + assert_eq!(torrent_peers.len(), peers.len()); + + for peer in torrent_peers { + assert!(peers.contains(&peer)); + } +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_update_a_peer(#[values(single(), mutex_std(), mutex_tokio())] mut torrent: Torrent, #[case] makes: &Makes) { + make(&mut torrent, makes).await; + + // Make and insert a new peer. + let mut peer = a_started_peer(-1); + torrent.upsert_peer(&peer).await; + + // Get the Inserted Peer by Id. + let peers = torrent.get_peers(None).await; + let original = peers + .iter() + .find(|p| peer::ReadInfo::get_id(*p) == peer::ReadInfo::get_id(&peer)) + .expect("it should find peer by id"); + + assert_eq!(original.event, AnnounceEvent::Started, "it should be as created"); + + // Announce "Completed" torrent download event. + peer.event = AnnounceEvent::Completed; + torrent.upsert_peer(&peer).await; + + // Get the Updated Peer by Id. + let peers = torrent.get_peers(None).await; + let updated = peers + .iter() + .find(|p| peer::ReadInfo::get_id(*p) == peer::ReadInfo::get_id(&peer)) + .expect("it should find peer by id"); + + assert_eq!(updated.event, AnnounceEvent::Completed, "it should be updated"); +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_remove_a_peer_upon_stopped_announcement( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + use torrust_tracker_primitives::peer::ReadInfo as _; + + make(&mut torrent, makes).await; + + let mut peer = a_started_peer(-1); + + torrent.upsert_peer(&peer).await; + + // The started peer should be inserted. + let peers = torrent.get_peers(None).await; + let original = peers + .iter() + .find(|p| p.get_id() == peer.get_id()) + .expect("it should find peer by id"); + + assert_eq!(original.event, AnnounceEvent::Started); + + // Change peer to "Stopped" and insert. + peer.event = AnnounceEvent::Stopped; + torrent.upsert_peer(&peer).await; + + // It should be removed now. + let peers = torrent.get_peers(None).await; + + assert_eq!( + peers.iter().find(|p| p.get_id() == peer.get_id()), + None, + "it should be removed" + ); +} + +#[rstest] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_handle_a_peer_completed_announcement_and_update_the_downloaded_statistic( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + make(&mut torrent, makes).await; + let downloaded = torrent.get_stats().await.downloaded; + + let peers = torrent.get_peers(None).await; + let mut peer = **peers.first().expect("there should be a peer"); + + let is_already_completed = peer.event == AnnounceEvent::Completed; + + // Announce "Completed" torrent download event. + peer.event = AnnounceEvent::Completed; + + torrent.upsert_peer(&peer).await; + let stats = torrent.get_stats().await; + + if is_already_completed { + assert_eq!(stats.downloaded, downloaded); + } else { + assert_eq!(stats.downloaded, downloaded + 1); + } +} + +#[rstest] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_update_a_peer_as_a_seeder( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + let peers = make(&mut torrent, makes).await; + let completed = u32::try_from(peers.iter().filter(|p| p.is_seeder()).count()).expect("it_should_not_be_so_many"); + + let peers = torrent.get_peers(None).await; + let mut peer = **peers.first().expect("there should be a peer"); + + let is_already_non_left = peer.left == NumberOfBytes(0); + + // Set Bytes Left to Zero + peer.left = NumberOfBytes(0); + torrent.upsert_peer(&peer).await; + let stats = torrent.get_stats().await; + + if is_already_non_left { + // it was already complete + assert_eq!(stats.complete, completed); + } else { + // now it is complete + assert_eq!(stats.complete, completed + 1); + } +} + +#[rstest] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_update_a_peer_as_incomplete( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + let peers = make(&mut torrent, makes).await; + let incomplete = u32::try_from(peers.iter().filter(|p| !p.is_seeder()).count()).expect("it should not be so many"); + + let peers = torrent.get_peers(None).await; + let mut peer = **peers.first().expect("there should be a peer"); + + let completed_already = peer.left == NumberOfBytes(0); + + // Set Bytes Left to no Zero + peer.left = NumberOfBytes(1); + torrent.upsert_peer(&peer).await; + let stats = torrent.get_stats().await; + + if completed_already { + // now it is incomplete + assert_eq!(stats.incomplete, incomplete + 1); + } else { + // was already incomplete + assert_eq!(stats.incomplete, incomplete); + } +} + +#[rstest] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_get_peers_excluding_the_client_socket( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + make(&mut torrent, makes).await; + + let peers = torrent.get_peers(None).await; + let mut peer = **peers.first().expect("there should be a peer"); + + let socket = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8081); + + // for this test, we should not already use this socket. + assert_ne!(peer.peer_addr, socket); + + // it should get the peer as it dose not share the socket. + assert!(torrent.get_peers_for_client(&socket, None).await.contains(&peer.into())); + + // set the address to the socket. + peer.peer_addr = socket; + torrent.upsert_peer(&peer).await; // Add peer + + // It should not include the peer that has the same socket. + assert!(!torrent.get_peers_for_client(&socket, None).await.contains(&peer.into())); +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_limit_the_number_of_peers_returned( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + make(&mut torrent, makes).await; + + // We add one more peer than the scrape limit + for peer_number in 1..=74 + 1 { + let mut peer = a_started_peer(1); + peer.peer_id = peer::Id::from(peer_number); + torrent.upsert_peer(&peer).await; + } + + let peers = torrent.get_peers(Some(TORRENT_PEERS_LIMIT)).await; + + assert_eq!(peers.len(), 74); +} + +#[rstest] +#[case::empty(&Makes::Empty)] +#[case::started(&Makes::Started)] +#[case::completed(&Makes::Completed)] +#[case::downloaded(&Makes::Downloaded)] +#[case::three(&Makes::Three)] +#[tokio::test] +async fn it_should_remove_inactive_peers_beyond_cutoff( + #[values(single(), mutex_std(), mutex_tokio(), mutex_parking_lot(), rw_lock_parking_lot())] mut torrent: Torrent, + #[case] makes: &Makes, +) { + const TIMEOUT: Duration = Duration::from_secs(120); + const EXPIRE: Duration = Duration::from_secs(121); + + let peers = make(&mut torrent, makes).await; + + let mut peer = a_completed_peer(-1); + + let now = clock::Working::now(); + clock::Stopped::local_set(&now); + + peer.updated = now.sub(EXPIRE); + + torrent.upsert_peer(&peer).await; + + assert_eq!(torrent.get_peers_len().await, peers.len() + 1); + + let current_cutoff = CurrentClock::now_sub(&TIMEOUT).unwrap_or_default(); + torrent.remove_inactive_peers(current_cutoff).await; + + assert_eq!(torrent.get_peers_len().await, peers.len()); +} diff --git a/packages/torrent-repository/tests/integration.rs b/packages/torrent-repository/tests/integration.rs new file mode 100644 index 000000000..5aab67b03 --- /dev/null +++ b/packages/torrent-repository/tests/integration.rs @@ -0,0 +1,22 @@ +//! Integration tests. +//! +//! ```text +//! cargo test --test integration +//! ``` + +use torrust_tracker_clock::clock; + +pub mod common; +mod entry; +mod repository; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; diff --git a/packages/torrent-repository/tests/repository/mod.rs b/packages/torrent-repository/tests/repository/mod.rs new file mode 100644 index 000000000..b3b742607 --- /dev/null +++ b/packages/torrent-repository/tests/repository/mod.rs @@ -0,0 +1,639 @@ +use std::collections::{BTreeMap, HashSet}; +use std::hash::{DefaultHasher, Hash, Hasher}; + +use rstest::{fixture, rstest}; +use torrust_tracker_configuration::TrackerPolicy; +use torrust_tracker_primitives::announce_event::AnnounceEvent; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::{NumberOfBytes, PersistentTorrents}; +use torrust_tracker_torrent_repository::entry::Entry as _; +use torrust_tracker_torrent_repository::repository::dash_map_mutex_std::XacrimonDashMap; +use torrust_tracker_torrent_repository::repository::rw_lock_std::RwLockStd; +use torrust_tracker_torrent_repository::repository::rw_lock_tokio::RwLockTokio; +use torrust_tracker_torrent_repository::repository::skip_map_mutex_std::CrossbeamSkipList; +use torrust_tracker_torrent_repository::EntrySingle; + +use crate::common::repo::Repo; +use crate::common::torrent_peer_builder::{a_completed_peer, a_started_peer}; + +#[fixture] +fn standard() -> Repo { + Repo::RwLockStd(RwLockStd::default()) +} + +#[fixture] +fn standard_mutex() -> Repo { + Repo::RwLockStdMutexStd(RwLockStd::default()) +} + +#[fixture] +fn standard_tokio() -> Repo { + Repo::RwLockStdMutexTokio(RwLockStd::default()) +} + +#[fixture] +fn tokio_std() -> Repo { + Repo::RwLockTokio(RwLockTokio::default()) +} + +#[fixture] +fn tokio_mutex() -> Repo { + Repo::RwLockTokioMutexStd(RwLockTokio::default()) +} + +#[fixture] +fn tokio_tokio() -> Repo { + Repo::RwLockTokioMutexTokio(RwLockTokio::default()) +} + +#[fixture] +fn skip_list_mutex_std() -> Repo { + Repo::SkipMapMutexStd(CrossbeamSkipList::default()) +} + +#[fixture] +fn skip_list_mutex_parking_lot() -> Repo { + Repo::SkipMapMutexParkingLot(CrossbeamSkipList::default()) +} + +#[fixture] +fn skip_list_rw_lock_parking_lot() -> Repo { + Repo::SkipMapRwLockParkingLot(CrossbeamSkipList::default()) +} + +#[fixture] +fn dash_map_std() -> Repo { + Repo::DashMapMutexStd(XacrimonDashMap::default()) +} + +type Entries = Vec<(InfoHash, EntrySingle)>; + +#[fixture] +fn empty() -> Entries { + vec![] +} + +#[fixture] +fn default() -> Entries { + vec![(InfoHash::default(), EntrySingle::default())] +} + +#[fixture] +fn started() -> Entries { + let mut torrent = EntrySingle::default(); + torrent.upsert_peer(&a_started_peer(1)); + vec![(InfoHash::default(), torrent)] +} + +#[fixture] +fn completed() -> Entries { + let mut torrent = EntrySingle::default(); + torrent.upsert_peer(&a_completed_peer(2)); + vec![(InfoHash::default(), torrent)] +} + +#[fixture] +fn downloaded() -> Entries { + let mut torrent = EntrySingle::default(); + let mut peer = a_started_peer(3); + torrent.upsert_peer(&peer); + peer.event = AnnounceEvent::Completed; + peer.left = NumberOfBytes(0); + torrent.upsert_peer(&peer); + vec![(InfoHash::default(), torrent)] +} + +#[fixture] +fn three() -> Entries { + let mut started = EntrySingle::default(); + let started_h = &mut DefaultHasher::default(); + started.upsert_peer(&a_started_peer(1)); + started.hash(started_h); + + let mut completed = EntrySingle::default(); + let completed_h = &mut DefaultHasher::default(); + completed.upsert_peer(&a_completed_peer(2)); + completed.hash(completed_h); + + let mut downloaded = EntrySingle::default(); + let downloaded_h = &mut DefaultHasher::default(); + let mut downloaded_peer = a_started_peer(3); + downloaded.upsert_peer(&downloaded_peer); + downloaded_peer.event = AnnounceEvent::Completed; + downloaded_peer.left = NumberOfBytes(0); + downloaded.upsert_peer(&downloaded_peer); + downloaded.hash(downloaded_h); + + vec![ + (InfoHash::from(&started_h.clone()), started), + (InfoHash::from(&completed_h.clone()), completed), + (InfoHash::from(&downloaded_h.clone()), downloaded), + ] +} + +#[fixture] +fn many_out_of_order() -> Entries { + let mut entries: HashSet<(InfoHash, EntrySingle)> = HashSet::default(); + + for i in 0..408 { + let mut entry = EntrySingle::default(); + entry.upsert_peer(&a_started_peer(i)); + + entries.insert((InfoHash::from(&i), entry)); + } + + // we keep the random order from the hashed set for the vector. + entries.iter().map(|(i, e)| (*i, e.clone())).collect() +} + +#[fixture] +fn many_hashed_in_order() -> Entries { + let mut entries: BTreeMap = BTreeMap::default(); + + for i in 0..408 { + let mut entry = EntrySingle::default(); + entry.upsert_peer(&a_started_peer(i)); + + let hash: &mut DefaultHasher = &mut DefaultHasher::default(); + hash.write_i32(i); + + entries.insert(InfoHash::from(&hash.clone()), entry); + } + + // We return the entries in-order from from the b-tree map. + entries.iter().map(|(i, e)| (*i, e.clone())).collect() +} + +#[fixture] +fn persistent_empty() -> PersistentTorrents { + PersistentTorrents::default() +} + +#[fixture] +fn persistent_single() -> PersistentTorrents { + let hash = &mut DefaultHasher::default(); + + hash.write_u8(1); + let t = [(InfoHash::from(&hash.clone()), 0_u32)]; + + t.iter().copied().collect() +} + +#[fixture] +fn persistent_three() -> PersistentTorrents { + let hash = &mut DefaultHasher::default(); + + hash.write_u8(1); + let info_1 = InfoHash::from(&hash.clone()); + hash.write_u8(2); + let info_2 = InfoHash::from(&hash.clone()); + hash.write_u8(3); + let info_3 = InfoHash::from(&hash.clone()); + + let t = [(info_1, 1_u32), (info_2, 2_u32), (info_3, 3_u32)]; + + t.iter().copied().collect() +} + +async fn make(repo: &Repo, entries: &Entries) { + for (info_hash, entry) in entries { + repo.insert(info_hash, entry.clone()).await; + } +} + +#[fixture] +fn paginated_limit_zero() -> Pagination { + Pagination::new(0, 0) +} + +#[fixture] +fn paginated_limit_one() -> Pagination { + Pagination::new(0, 1) +} + +#[fixture] +fn paginated_limit_one_offset_one() -> Pagination { + Pagination::new(1, 1) +} + +#[fixture] +fn policy_none() -> TrackerPolicy { + TrackerPolicy::new(0, false, false) +} + +#[fixture] +fn policy_persist() -> TrackerPolicy { + TrackerPolicy::new(0, true, false) +} + +#[fixture] +fn policy_remove() -> TrackerPolicy { + TrackerPolicy::new(0, false, true) +} + +#[fixture] +fn policy_remove_persist() -> TrackerPolicy { + TrackerPolicy::new(0, true, true) +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_get_a_torrent_entry( + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_mutex_std(), + skip_list_mutex_parking_lot(), + skip_list_rw_lock_parking_lot(), + dash_map_std() + )] + repo: Repo, + #[case] entries: Entries, +) { + make(&repo, &entries).await; + + if let Some((info_hash, torrent)) = entries.first() { + assert_eq!(repo.get(info_hash).await, Some(torrent.clone())); + } else { + assert_eq!(repo.get(&InfoHash::default()).await, None); + } +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_get_paginated_entries_in_a_stable_or_sorted_order( + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_mutex_std(), + skip_list_mutex_parking_lot(), + skip_list_rw_lock_parking_lot() + )] + repo: Repo, + #[case] entries: Entries, + many_out_of_order: Entries, +) { + make(&repo, &entries).await; + + let entries_a = repo.get_paginated(None).await.iter().map(|(i, _)| *i).collect::>(); + + make(&repo, &many_out_of_order).await; + + let entries_b = repo.get_paginated(None).await.iter().map(|(i, _)| *i).collect::>(); + + let is_equal = entries_b.iter().take(entries_a.len()).copied().collect::>() == entries_a; + + let is_sorted = entries_b.windows(2).all(|w| w[0] <= w[1]); + + assert!( + is_equal || is_sorted, + "The order is unstable: {is_equal}, or is sorted {is_sorted}." + ); +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_get_paginated( + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_mutex_std(), + skip_list_mutex_parking_lot(), + skip_list_rw_lock_parking_lot() + )] + repo: Repo, + #[case] entries: Entries, + #[values(paginated_limit_zero(), paginated_limit_one(), paginated_limit_one_offset_one())] paginated: Pagination, +) { + make(&repo, &entries).await; + + let mut info_hashes = repo.get_paginated(None).await.iter().map(|(i, _)| *i).collect::>(); + info_hashes.sort(); + + match paginated { + // it should return empty if limit is zero. + Pagination { limit: 0, .. } => assert_eq!(repo.get_paginated(Some(&paginated)).await, vec![]), + + // it should return a single entry if the limit is one. + Pagination { limit: 1, offset: 0 } => { + if info_hashes.is_empty() { + assert_eq!(repo.get_paginated(Some(&paginated)).await.len(), 0); + } else { + let page = repo.get_paginated(Some(&paginated)).await; + assert_eq!(page.len(), 1); + assert_eq!(page.first().map(|(i, _)| i), info_hashes.first()); + } + } + + // it should return the only the second entry if both the limit and the offset are one. + Pagination { limit: 1, offset: 1 } => { + if info_hashes.len() > 1 { + let page = repo.get_paginated(Some(&paginated)).await; + assert_eq!(page.len(), 1); + assert_eq!(page[0].0, info_hashes[1]); + } + } + // the other cases are not yet tested. + _ => {} + } +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_get_metrics( + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_mutex_std(), + skip_list_mutex_parking_lot(), + skip_list_rw_lock_parking_lot(), + dash_map_std() + )] + repo: Repo, + #[case] entries: Entries, +) { + use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + + make(&repo, &entries).await; + + let mut metrics = TorrentsMetrics::default(); + + for (_, torrent) in entries { + let stats = torrent.get_swarm_metadata(); + + metrics.torrents += 1; + metrics.incomplete += u64::from(stats.incomplete); + metrics.complete += u64::from(stats.complete); + metrics.downloaded += u64::from(stats.downloaded); + } + + assert_eq!(repo.get_metrics().await, metrics); +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_import_persistent_torrents( + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_mutex_std(), + skip_list_mutex_parking_lot(), + skip_list_rw_lock_parking_lot(), + dash_map_std() + )] + repo: Repo, + #[case] entries: Entries, + #[values(persistent_empty(), persistent_single(), persistent_three())] persistent_torrents: PersistentTorrents, +) { + make(&repo, &entries).await; + + let mut downloaded = repo.get_metrics().await.downloaded; + persistent_torrents.iter().for_each(|(_, d)| downloaded += u64::from(*d)); + + repo.import_persistent(&persistent_torrents).await; + + assert_eq!(repo.get_metrics().await.downloaded, downloaded); + + for (entry, _) in persistent_torrents { + assert!(repo.get(&entry).await.is_some()); + } +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_remove_an_entry( + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_mutex_std(), + skip_list_mutex_parking_lot(), + skip_list_rw_lock_parking_lot(), + dash_map_std() + )] + repo: Repo, + #[case] entries: Entries, +) { + make(&repo, &entries).await; + + for (info_hash, torrent) in entries { + assert_eq!(repo.get(&info_hash).await, Some(torrent.clone())); + assert_eq!(repo.remove(&info_hash).await, Some(torrent)); + + assert_eq!(repo.get(&info_hash).await, None); + assert_eq!(repo.remove(&info_hash).await, None); + } + + assert_eq!(repo.get_metrics().await.torrents, 0); +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_remove_inactive_peers( + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_mutex_std(), + skip_list_mutex_parking_lot(), + skip_list_rw_lock_parking_lot(), + dash_map_std() + )] + repo: Repo, + #[case] entries: Entries, +) { + use std::ops::Sub as _; + use std::time::Duration; + + use torrust_tracker_clock::clock::stopped::Stopped as _; + use torrust_tracker_clock::clock::{self, Time as _}; + use torrust_tracker_primitives::peer; + + use crate::CurrentClock; + + const TIMEOUT: Duration = Duration::from_secs(120); + const EXPIRE: Duration = Duration::from_secs(121); + + make(&repo, &entries).await; + + let info_hash: InfoHash; + let mut peer: peer::Peer; + + // Generate a new infohash and peer. + { + let hash = &mut DefaultHasher::default(); + hash.write_u8(255); + info_hash = InfoHash::from(&hash.clone()); + peer = a_completed_peer(-1); + } + + // Set the last updated time of the peer to be 121 seconds ago. + { + let now = clock::Working::now(); + clock::Stopped::local_set(&now); + + peer.updated = now.sub(EXPIRE); + } + + // Insert the infohash and peer into the repository + // and verify there is an extra torrent entry. + { + repo.upsert_peer(&info_hash, &peer).await; + assert_eq!(repo.get_metrics().await.torrents, entries.len() as u64 + 1); + } + + // Insert the infohash and peer into the repository + // and verify the swarm metadata was updated. + { + repo.upsert_peer(&info_hash, &peer).await; + let stats = repo.get_swarm_metadata(&info_hash).await; + assert_eq!( + stats, + Some(SwarmMetadata { + downloaded: 0, + complete: 1, + incomplete: 0 + }) + ); + } + + // Verify that this new peer was inserted into the repository. + { + let entry = repo.get(&info_hash).await.expect("it_should_get_some"); + assert!(entry.get_peers(None).contains(&peer.into())); + } + + // Remove peers that have not been updated since the timeout (120 seconds ago). + { + repo.remove_inactive_peers(CurrentClock::now_sub(&TIMEOUT).expect("it should get a time passed")) + .await; + } + + // Verify that the this peer was removed from the repository. + { + let entry = repo.get(&info_hash).await.expect("it_should_get_some"); + assert!(!entry.get_peers(None).contains(&peer.into())); + } +} + +#[rstest] +#[case::empty(empty())] +#[case::default(default())] +#[case::started(started())] +#[case::completed(completed())] +#[case::downloaded(downloaded())] +#[case::three(three())] +#[case::out_of_order(many_out_of_order())] +#[case::in_order(many_hashed_in_order())] +#[tokio::test] +async fn it_should_remove_peerless_torrents( + #[values( + standard(), + standard_mutex(), + standard_tokio(), + tokio_std(), + tokio_mutex(), + tokio_tokio(), + skip_list_mutex_std(), + skip_list_mutex_parking_lot(), + skip_list_rw_lock_parking_lot(), + dash_map_std() + )] + repo: Repo, + #[case] entries: Entries, + #[values(policy_none(), policy_persist(), policy_remove(), policy_remove_persist())] policy: TrackerPolicy, +) { + make(&repo, &entries).await; + + repo.remove_peerless_torrents(&policy).await; + + let torrents = repo.get_paginated(None).await; + + for (_, entry) in torrents { + assert!(entry.meets_retaining_policy(&policy)); + } +} diff --git a/share/container/entry_script_sh b/share/container/entry_script_sh index 94dfa6b81..32cdfe33d 100644 --- a/share/container/entry_script_sh +++ b/share/container/entry_script_sh @@ -26,29 +26,29 @@ chmod -R 2770 /var/lib/torrust /var/log/torrust /etc/torrust # Install the database and config: -if [ -n "$TORRUST_TRACKER_DATABASE_DRIVER" ]; then - if cmp_lc "$TORRUST_TRACKER_DATABASE_DRIVER" "sqlite3"; then +if [ -n "$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER" ]; then + if cmp_lc "$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER" "sqlite3"; then - # Select sqlite3 empty database + # Select Sqlite3 empty database default_database="/usr/share/torrust/default/database/tracker.sqlite3.db" - # Select sqlite3 default configuration + # Select Sqlite3 default configuration default_config="/usr/share/torrust/default/config/tracker.container.sqlite3.toml" - elif cmp_lc "$TORRUST_TRACKER_DATABASE_DRIVER" "mysql"; then + elif cmp_lc "$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER" "mysql"; then - # (no database file needed for mysql) + # (no database file needed for MySQL) - # Select default mysql configuration + # Select default MySQL configuration default_config="/usr/share/torrust/default/config/tracker.container.mysql.toml" else - echo "Error: Unsupported Database Type: \"$TORRUST_TRACKER_DATABASE_DRIVER\"." + echo "Error: Unsupported Database Type: \"$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER\"." echo "Please Note: Supported Database Types: \"sqlite3\", \"mysql\"." exit 1 fi else - echo "Error: \"\$TORRUST_TRACKER_DATABASE_DRIVER\" was not set!"; exit 1; + echo "Error: \"\$TORRUST_TRACKER_CONFIG_OVERRIDE_CORE__DATABASE__DRIVER\" was not set!"; exit 1; fi install_config="/etc/torrust/tracker/tracker.toml" @@ -73,6 +73,7 @@ if [ -e "/usr/share/torrust/container/message" ]; then fi # Load message of the day from Profile +# shellcheck disable=SC2016 echo '[ ! -z "$TERM" -a -r /etc/motd ] && cat /etc/motd' >> /etc/profile cd /home/torrust || exit 1 diff --git a/share/default/config/tracker.container.mysql.toml b/share/default/config/tracker.container.mysql.toml index fb9cbf789..865ea224e 100644 --- a/share/default/config/tracker.container.mysql.toml +++ b/share/default/config/tracker.container.mysql.toml @@ -1,38 +1,29 @@ -announce_interval = 120 -db_driver = "MySQL" -db_path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" -external_ip = "0.0.0.0" -inactive_peer_cleanup_interval = 600 -log_level = "info" -max_peer_timeout = 900 -min_announce_interval = 120 -mode = "public" -on_reverse_proxy = false -persistent_torrent_completed_stat = false -remove_peerless_torrents = true -tracker_usage_statistics = true - -[[udp_trackers]] -bind_address = "0.0.0.0:6969" -enabled = false - -[[http_trackers]] -bind_address = "0.0.0.0:7070" -enabled = false -ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" -ssl_enabled = false -ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" - -[http_api] -bind_address = "0.0.0.0:1212" -enabled = true -ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" -ssl_enabled = false -ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" - -# Please override the admin token setting the -# `TORRUST_TRACKER_API_ADMIN_TOKEN` -# environmental variable! - -[http_api.access_tokens] -admin = "MyAccessToken" +[metadata] +app = "torrust-tracker" +purpose = "configuration" +schema_version = "2.0.0" + +[logging] +threshold = "info" + +[core] +listed = false +private = false + +[core.database] +driver = "mysql" +path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker" + +# Uncomment to enable services + +#[[udp_trackers]] +#bind_address = "0.0.0.0:6969" + +#[[http_trackers]] +#bind_address = "0.0.0.0:7070" + +#[http_api] +#bind_address = "0.0.0.0:1212" + +#[http_api.access_tokens] +#admin = "MyAccessToken" diff --git a/share/default/config/tracker.container.sqlite3.toml b/share/default/config/tracker.container.sqlite3.toml index 54cfd4023..6c73cf54a 100644 --- a/share/default/config/tracker.container.sqlite3.toml +++ b/share/default/config/tracker.container.sqlite3.toml @@ -1,38 +1,28 @@ -announce_interval = 120 -db_driver = "Sqlite3" -db_path = "/var/lib/torrust/tracker/database/sqlite3.db" -external_ip = "0.0.0.0" -inactive_peer_cleanup_interval = 600 -log_level = "info" -max_peer_timeout = 900 -min_announce_interval = 120 -mode = "public" -on_reverse_proxy = false -persistent_torrent_completed_stat = false -remove_peerless_torrents = true -tracker_usage_statistics = true - -[[udp_trackers]] -bind_address = "0.0.0.0:6969" -enabled = false - -[[http_trackers]] -bind_address = "0.0.0.0:7070" -enabled = false -ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" -ssl_enabled = false -ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" - -[http_api] -bind_address = "0.0.0.0:1212" -enabled = true -ssl_cert_path = "/var/lib/torrust/tracker/tls/localhost.crt" -ssl_enabled = false -ssl_key_path = "/var/lib/torrust/tracker/tls/localhost.key" - -# Please override the admin token setting the -# `TORRUST_TRACKER_API_ADMIN_TOKEN` -# environmental variable! - -[http_api.access_tokens] -admin = "MyAccessToken" +[metadata] +app = "torrust-tracker" +purpose = "configuration" +schema_version = "2.0.0" + +[logging] +threshold = "info" + +[core] +listed = false +private = false + +[core.database] +path = "/var/lib/torrust/tracker/database/sqlite3.db" + +# Uncomment to enable services + +#[[udp_trackers]] +#bind_address = "0.0.0.0:6969" + +#[[http_trackers]] +#bind_address = "0.0.0.0:7070" + +#[http_api] +#bind_address = "0.0.0.0:1212" + +#[http_api.access_tokens] +#admin = "MyAccessToken" diff --git a/share/default/config/tracker.development.sqlite3.toml b/share/default/config/tracker.development.sqlite3.toml index 20f95ac5d..96addaf87 100644 --- a/share/default/config/tracker.development.sqlite3.toml +++ b/share/default/config/tracker.development.sqlite3.toml @@ -1,34 +1,23 @@ -announce_interval = 120 -db_driver = "Sqlite3" -db_path = "./storage/tracker/lib/database/sqlite3.db" -external_ip = "0.0.0.0" -inactive_peer_cleanup_interval = 600 -log_level = "info" -max_peer_timeout = 900 -min_announce_interval = 120 -mode = "public" -on_reverse_proxy = false -persistent_torrent_completed_stat = false -remove_peerless_torrents = true -tracker_usage_statistics = true +[metadata] +app = "torrust-tracker" +purpose = "configuration" +schema_version = "2.0.0" + +[logging] +threshold = "info" + +[core] +listed = false +private = false [[udp_trackers]] bind_address = "0.0.0.0:6969" -enabled = false [[http_trackers]] bind_address = "0.0.0.0:7070" -enabled = false -ssl_cert_path = "" -ssl_enabled = false -ssl_key_path = "" [http_api] -bind_address = "127.0.0.1:1212" -enabled = true -ssl_cert_path = "" -ssl_enabled = false -ssl_key_path = "" +bind_address = "0.0.0.0:1212" [http_api.access_tokens] admin = "MyAccessToken" diff --git a/share/default/config/tracker.e2e.container.sqlite3.toml b/share/default/config/tracker.e2e.container.sqlite3.toml new file mode 100644 index 000000000..73c6df219 --- /dev/null +++ b/share/default/config/tracker.e2e.container.sqlite3.toml @@ -0,0 +1,30 @@ +[metadata] +app = "torrust-tracker" +purpose = "configuration" +schema_version = "2.0.0" + +[logging] +threshold = "info" + +[core] +listed = false +private = false + +[core.database] +path = "/var/lib/torrust/tracker/database/sqlite3.db" + +[[udp_trackers]] +bind_address = "0.0.0.0:6969" + +[[http_trackers]] +bind_address = "0.0.0.0:7070" + +[http_api] +bind_address = "0.0.0.0:1212" + +[http_api.access_tokens] +admin = "MyAccessToken" + +[health_check_api] +# Must be bound to wildcard IP to be accessible from outside the container +bind_address = "0.0.0.0:1313" diff --git a/share/default/config/tracker.udp.benchmarking.toml b/share/default/config/tracker.udp.benchmarking.toml new file mode 100644 index 000000000..c6644d8dc --- /dev/null +++ b/share/default/config/tracker.udp.benchmarking.toml @@ -0,0 +1,21 @@ +[metadata] +schema_version = "2.0.0" + +[logging] +threshold = "error" + +[core] +listed = false +private = false +tracker_usage_statistics = false + +[core.database] +driver = "sqlite3" +path = "./sqlite3.db" + +[core.tracker_policy] +persistent_torrent_completed_stat = false +remove_peerless_torrents = false + +[[udp_trackers]] +bind_address = "0.0.0.0:6969" diff --git a/share/default/config/tracker_checker.json b/share/default/config/tracker_checker.json new file mode 100644 index 000000000..7d1453bfd --- /dev/null +++ b/share/default/config/tracker_checker.json @@ -0,0 +1,11 @@ +{ + "udp_trackers": [ + "127.0.0.1:6969" + ], + "http_trackers": [ + "http://127.0.0.1:7070" + ], + "health_checks": [ + "http://127.0.0.1:1313/health_check" + ] +} \ No newline at end of file diff --git a/src/app.rs b/src/app.rs index 3fc790a23..b2447a9ef 100644 --- a/src/app.rs +++ b/src/app.rs @@ -11,7 +11,11 @@ //! - Loading data from the database when it's needed. //! - Starting some jobs depending on the configuration. //! -//! The started jobs may be: +//! Jobs executed always: +//! +//! - Health Check API +//! +//! Optional jobs: //! //! - Torrent cleaner: it removes inactive peers and (optionally) peerless torrents. //! - UDP trackers: the user can enable multiple UDP tracker on several ports. @@ -19,20 +23,32 @@ //! - Tracker REST API: the tracker API can be enabled/disabled. use std::sync::Arc; -use log::warn; use tokio::task::JoinHandle; use torrust_tracker_configuration::Configuration; +use tracing::{info, warn}; -use crate::bootstrap::jobs::{http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; -use crate::servers::http::Version; -use crate::tracker; +use crate::bootstrap::jobs::{health_check_api, http_tracker, torrent_cleanup, tracker_apis, udp_tracker}; +use crate::servers::registar::Registar; +use crate::{core, servers}; /// # Panics /// -/// Will panic if the socket address for API can't be parsed. -pub async fn start(config: Arc, tracker: Arc) -> Vec> { +/// Will panic if: +/// +/// - Can't retrieve tracker keys from database. +/// - Can't load whitelist from database. +pub async fn start(config: &Configuration, tracker: Arc) -> Vec> { + if config.http_api.is_none() + && (config.udp_trackers.is_none() || config.udp_trackers.as_ref().map_or(true, std::vec::Vec::is_empty)) + && (config.http_trackers.is_none() || config.http_trackers.as_ref().map_or(true, std::vec::Vec::is_empty)) + { + warn!("No services enabled in configuration"); + } + let mut jobs: Vec> = Vec::new(); + let registar = Registar::default(); + // Load peer keys if tracker.is_private() { tracker @@ -42,7 +58,7 @@ pub async fn start(config: Arc, tracker: Arc) - } // Load whitelisted torrents - if tracker.is_whitelisted() { + if tracker.is_listed() { tracker .load_whitelist_from_database() .await @@ -50,38 +66,62 @@ pub async fn start(config: Arc, tracker: Arc) - } // Start the UDP blocks - for udp_tracker_config in &config.udp_trackers { - if !udp_tracker_config.enabled { - continue; - } - - if tracker.is_private() { - warn!( - "Could not start UDP tracker on: {} while in {:?}. UDP is not safe for private trackers!", - udp_tracker_config.bind_address, config.mode - ); - } else { - jobs.push(udp_tracker::start_job(udp_tracker_config, tracker.clone())); + if let Some(udp_trackers) = &config.udp_trackers { + for udp_tracker_config in udp_trackers { + if tracker.is_private() { + warn!( + "Could not start UDP tracker on: {} while in private mode. UDP is not safe for private trackers!", + udp_tracker_config.bind_address + ); + } else { + jobs.push(udp_tracker::start_job(udp_tracker_config, tracker.clone(), registar.give_form()).await); + } } + } else { + info!("No UDP blocks in configuration"); } // Start the HTTP blocks - for http_tracker_config in &config.http_trackers { - if !http_tracker_config.enabled { - continue; + if let Some(http_trackers) = &config.http_trackers { + for http_tracker_config in http_trackers { + if let Some(job) = http_tracker::start_job( + http_tracker_config, + tracker.clone(), + registar.give_form(), + servers::http::Version::V1, + ) + .await + { + jobs.push(job); + }; } - jobs.push(http_tracker::start_job(http_tracker_config, tracker.clone(), Version::V1).await); + } else { + info!("No HTTP blocks in configuration"); } // Start HTTP API - if config.http_api.enabled { - jobs.push(tracker_apis::start_job(&config.http_api, tracker.clone()).await); + if let Some(http_api_config) = &config.http_api { + if let Some(job) = tracker_apis::start_job( + http_api_config, + tracker.clone(), + registar.give_form(), + servers::apis::Version::V1, + ) + .await + { + jobs.push(job); + }; + } else { + info!("No API block in configuration"); } - // Remove torrents without peers, every interval - if config.inactive_peer_cleanup_interval > 0 { - jobs.push(torrent_cleanup::start_job(&config, &tracker)); + // Start runners to remove torrents without peers, every interval + if config.core.inactive_peer_cleanup_interval > 0 { + jobs.push(torrent_cleanup::start_job(&config.core, &tracker)); } + // Start Health Check API + jobs.push(health_check_api::start_job(&config.health_check_api, registar.entries()).await); + jobs } diff --git a/src/bin/e2e_tests_runner.rs b/src/bin/e2e_tests_runner.rs new file mode 100644 index 000000000..eb91c0d86 --- /dev/null +++ b/src/bin/e2e_tests_runner.rs @@ -0,0 +1,6 @@ +//! Program to run E2E tests. +use torrust_tracker::console::ci::e2e; + +fn main() -> anyhow::Result<()> { + e2e::runner::run() +} diff --git a/src/bin/http_health_check.rs b/src/bin/http_health_check.rs new file mode 100644 index 000000000..b7c6dfa41 --- /dev/null +++ b/src/bin/http_health_check.rs @@ -0,0 +1,42 @@ +//! Minimal `curl` or `wget` to be used for container health checks. +//! +//! It's convenient to avoid using third-party libraries because: +//! +//! - They are harder to maintain. +//! - They introduce new attack vectors. +use std::time::Duration; +use std::{env, process}; + +use reqwest::Client; + +#[tokio::main] +async fn main() { + let args: Vec = env::args().collect(); + if args.len() != 2 { + eprintln!("Usage: cargo run --bin http_health_check "); + eprintln!("Example: cargo run --bin http_health_check http://127.0.0.1:1313/health_check"); + std::process::exit(1); + } + + println!("Health check ..."); + + let url = &args[1].clone(); + + let client = Client::builder().timeout(Duration::from_secs(5)).build().unwrap(); + + match client.get(url).send().await { + Ok(response) => { + if response.status().is_success() { + println!("STATUS: {}", response.status()); + process::exit(0); + } else { + println!("Non-success status received."); + process::exit(1); + } + } + Err(err) => { + println!("ERROR: {err}"); + process::exit(1); + } + } +} diff --git a/src/bin/http_tracker_client.rs b/src/bin/http_tracker_client.rs new file mode 100644 index 000000000..0de040549 --- /dev/null +++ b/src/bin/http_tracker_client.rs @@ -0,0 +1,7 @@ +//! Program to make request to HTTP trackers. +use torrust_tracker::console::clients::http::app; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + app::run().await +} diff --git a/src/bin/profiling.rs b/src/bin/profiling.rs new file mode 100644 index 000000000..bc1ac6526 --- /dev/null +++ b/src/bin/profiling.rs @@ -0,0 +1,8 @@ +//! This binary is used for profiling with [valgrind](https://valgrind.org/) +//! and [kcachegrind](https://kcachegrind.github.io/). +use torrust_tracker::console::profiling::run; + +#[tokio::main] +async fn main() { + run().await; +} diff --git a/src/bin/tracker_checker.rs b/src/bin/tracker_checker.rs new file mode 100644 index 000000000..87aeedeac --- /dev/null +++ b/src/bin/tracker_checker.rs @@ -0,0 +1,7 @@ +//! Program to check running trackers. +use torrust_tracker::console::clients::checker::app; + +#[tokio::main] +async fn main() { + app::run().await.expect("Some checks fail"); +} diff --git a/src/bin/udp_tracker_client.rs b/src/bin/udp_tracker_client.rs new file mode 100644 index 000000000..909b296ca --- /dev/null +++ b/src/bin/udp_tracker_client.rs @@ -0,0 +1,7 @@ +//! Program to make request to UDP trackers. +use torrust_tracker::console::clients::udp::app; + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + app::run().await +} diff --git a/src/bootstrap/app.rs b/src/bootstrap/app.rs index 6961e15f0..b79f4dc86 100644 --- a/src/bootstrap/app.rs +++ b/src/bootstrap/app.rs @@ -1,6 +1,6 @@ //! Setup for the main tracker application. //! -//! The [`setup`](bootstrap::app::setup) only builds the application and its dependencies but it does not start the application. +//! The [`setup`] only builds the application and its dependencies but it does not start the application. //! In fact, there is no such thing as the main application process. When the application starts, the only thing it does is //! starting a bunch of independent jobs. If you are looking for how things are started you should read [`app::start`](crate::app::start) //! function documentation. @@ -13,21 +13,34 @@ //! 4. Initialize the domain tracker. use std::sync::Arc; +use torrust_tracker_clock::static_time; +use torrust_tracker_configuration::validator::Validator; use torrust_tracker_configuration::Configuration; +use tracing::info; use super::config::initialize_configuration; use crate::bootstrap; -use crate::shared::clock::static_time; +use crate::core::services::tracker_factory; +use crate::core::Tracker; use crate::shared::crypto::ephemeral_instance_keys; -use crate::tracker::services::tracker_factory; -use crate::tracker::Tracker; -/// It loads the configuration from the environment and builds the main domain [`tracker`](crate::tracker::Tracker) struct. +/// It loads the configuration from the environment and builds the main domain [`Tracker`] struct. +/// +/// # Panics +/// +/// Setup can file if the configuration is invalid. #[must_use] -pub fn setup() -> (Arc, Arc) { - let configuration = Arc::new(initialize_configuration()); +pub fn setup() -> (Configuration, Arc) { + let configuration = initialize_configuration(); + + if let Err(e) = configuration.validate() { + panic!("Configuration error: {e}"); + } + let tracker = initialize_with_configuration(&configuration); + info!("Configuration:\n{}", configuration.clone().mask_secrets().to_json()); + (configuration, tracker) } @@ -35,7 +48,7 @@ pub fn setup() -> (Arc, Arc) { /// /// The configuration may be obtained from the environment (via config file or env vars). #[must_use] -pub fn initialize_with_configuration(configuration: &Arc) -> Arc { +pub fn initialize_with_configuration(configuration: &Configuration) -> Arc { initialize_static(); initialize_logging(configuration); Arc::new(initialize_tracker(configuration)) @@ -60,13 +73,13 @@ pub fn initialize_static() { /// The tracker is the domain layer service. It's the entrypoint to make requests to the domain layer. /// It's used by other higher-level components like the UDP and HTTP trackers or the tracker API. #[must_use] -pub fn initialize_tracker(config: &Arc) -> Tracker { - tracker_factory(config.clone()) +pub fn initialize_tracker(config: &Configuration) -> Tracker { + tracker_factory(config) } -/// It initializes the log level, format and channel. +/// It initializes the log threshold, format and channel. /// /// See [the logging setup](crate::bootstrap::logging::setup) for more info about logging. -pub fn initialize_logging(config: &Arc) { +pub fn initialize_logging(config: &Configuration) { bootstrap::logging::setup(config); } diff --git a/src/bootstrap/config.rs b/src/bootstrap/config.rs index 858fd59fc..fb5afe403 100644 --- a/src/bootstrap/config.rs +++ b/src/bootstrap/config.rs @@ -1,20 +1,9 @@ //! Initialize configuration from file or env var. //! -//! All environment variables are prefixed with `TORRUST_TRACKER_BACK_`. +//! All environment variables are prefixed with `TORRUST_TRACKER_`. use torrust_tracker_configuration::{Configuration, Info}; -// Environment variables - -/// The whole `tracker.toml` file content. It has priority over the config file. -/// Even if the file is not on the default path. -const ENV_VAR_CONFIG: &str = "TORRUST_TRACKER_CONFIG"; -const ENV_VAR_API_ADMIN_TOKEN: &str = "TORRUST_TRACKER_API_ADMIN_TOKEN"; - -/// The `tracker.toml` file location. -pub const ENV_VAR_PATH_CONFIG: &str = "TORRUST_TRACKER_PATH_CONFIG"; - -// Default values pub const DEFAULT_PATH_CONFIG: &str = "./share/default/config/tracker.development.sqlite3.toml"; /// It loads the application configuration from the environment. @@ -22,7 +11,7 @@ pub const DEFAULT_PATH_CONFIG: &str = "./share/default/config/tracker.developmen /// There are two methods to inject the configuration: /// /// 1. By using a config file: `tracker.toml`. -/// 2. Environment variable: `TORRUST_TRACKER_CONFIG`. The variable contains the same contents as the `tracker.toml` file. +/// 2. Environment variable: `TORRUST_TRACKER_CONFIG_TOML`. The variable contains the same contents as the `tracker.toml` file. /// /// Environment variable has priority over the config file. /// @@ -31,18 +20,11 @@ pub const DEFAULT_PATH_CONFIG: &str = "./share/default/config/tracker.developmen /// # Panics /// /// Will panic if it can't load the configuration from either -/// `./tracker.toml` file or the env var `TORRUST_TRACKER_CONFIG`. +/// `./tracker.toml` file or the env var `TORRUST_TRACKER_CONFIG_TOML`. #[must_use] pub fn initialize_configuration() -> Configuration { - let info = Info::new( - ENV_VAR_CONFIG.to_string(), - ENV_VAR_PATH_CONFIG.to_string(), - DEFAULT_PATH_CONFIG.to_string(), - ENV_VAR_API_ADMIN_TOKEN.to_string(), - ) - .unwrap(); - - Configuration::load(&info).unwrap() + let info = Info::new(DEFAULT_PATH_CONFIG.to_string()).expect("info to load configuration is not valid"); + Configuration::load(&info).expect("error loading configuration from sources") } #[cfg(test)] diff --git a/src/bootstrap/jobs/health_check_api.rs b/src/bootstrap/jobs/health_check_api.rs new file mode 100644 index 000000000..b4d4862ee --- /dev/null +++ b/src/bootstrap/jobs/health_check_api.rs @@ -0,0 +1,71 @@ +//! Health Check API job starter. +//! +//! The [`health_check_api::start_job`](crate::bootstrap::jobs::health_check_api::start_job) +//! function starts the Health Check REST API. +//! +//! The [`health_check_api::start_job`](crate::bootstrap::jobs::health_check_api::start_job) +//! function spawns a new asynchronous task, that tasks is the "**launcher**". +//! The "**launcher**" starts the actual server and sends a message back +//! to the main application. +//! +//! The "**launcher**" is an intermediary thread that decouples the Health Check +//! API server from the process that handles it. +//! +//! Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) +//! for the API configuration options. + +use tokio::sync::oneshot; +use tokio::task::JoinHandle; +use torrust_tracker_configuration::HealthCheckApi; +use tracing::info; + +use super::Started; +use crate::servers::health_check_api::{server, HEALTH_CHECK_API_LOG_TARGET}; +use crate::servers::logging::STARTED_ON; +use crate::servers::registar::ServiceRegistry; +use crate::servers::signals::Halted; + +/// This function starts a new Health Check API server with the provided +/// configuration. +/// +/// The functions starts a new concurrent task that will run the API server. +/// This task will send a message to the main application process to notify +/// that the API server was successfully started. +/// +/// # Panics +/// +/// It would panic if unable to send the `ApiServerJobStarted` notice. +pub async fn start_job(config: &HealthCheckApi, register: ServiceRegistry) -> JoinHandle<()> { + let bind_addr = config.bind_address; + + let (tx_start, rx_start) = oneshot::channel::(); + let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); + + let protocol = "http"; + + // Run the API server + let join_handle = tokio::spawn(async move { + info!(target: HEALTH_CHECK_API_LOG_TARGET, "Starting on: {protocol}://{}", bind_addr); + + let handle = server::start(bind_addr, tx_start, rx_halt, register); + + if let Ok(()) = handle.await { + info!(target: HEALTH_CHECK_API_LOG_TARGET, "Stopped server running on: {protocol}://{}", bind_addr); + } + }); + + // Wait until the server sends the started message + match rx_start.await { + Ok(msg) => info!(target: HEALTH_CHECK_API_LOG_TARGET, "{STARTED_ON}: {protocol}://{}", msg.address), + Err(e) => panic!("the Health Check API server was dropped: {e}"), + } + + // Wait until the server finishes + tokio::spawn(async move { + assert!(!tx_halt.is_closed(), "Halt channel for Health Check API should be open"); + + join_handle + .await + .expect("it should be able to join to the Health Check API server task"); + }) +} diff --git a/src/bootstrap/jobs/http_tracker.rs b/src/bootstrap/jobs/http_tracker.rs index ac0161640..745f564b1 100644 --- a/src/bootstrap/jobs/http_tracker.rs +++ b/src/bootstrap/jobs/http_tracker.rs @@ -3,92 +3,97 @@ //! The function [`http_tracker::start_job`](crate::bootstrap::jobs::http_tracker::start_job) starts a new HTTP tracker server. //! //! > **NOTICE**: the application can launch more than one HTTP tracker on different ports. -//! Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) for the configuration options. +//! > Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) for the configuration options. //! //! The [`http_tracker::start_job`](crate::bootstrap::jobs::http_tracker::start_job) function spawns a new asynchronous task, //! that tasks is the "**launcher**". The "**launcher**" starts the actual server and sends a message back to the main application. -//! The main application waits until receives the message [`ServerJobStarted`](crate::bootstrap::jobs::http_tracker::ServerJobStarted) from the "**launcher**". //! //! The "**launcher**" is an intermediary thread that decouples the HTTP servers from the process that handles it. The HTTP could be used independently in the future. //! In that case it would not need to notify a parent process. +use std::net::SocketAddr; use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; -use log::info; -use tokio::sync::oneshot; use tokio::task::JoinHandle; use torrust_tracker_configuration::HttpTracker; -use crate::servers::http::v1::launcher; +use super::make_rust_tls; +use crate::core; +use crate::servers::http::server::{HttpServer, Launcher}; use crate::servers::http::Version; -use crate::tracker; - -/// This is the message that the "**launcher**" spawned task sends to the main application process to notify that the HTTP server was successfully started. -/// -/// > **NOTICE**: it does not mean the HTTP server is ready to receive requests. It only means the new server started. It might take some time to the server to be ready to accept request. -#[derive(Debug)] -pub struct ServerJobStarted(); +use crate::servers::registar::ServiceRegistrationForm; /// It starts a new HTTP server with the provided configuration and version. /// /// Right now there is only one version but in the future we could support more than one HTTP tracker version at the same time. /// This feature allows supporting breaking changes on `BitTorrent` BEPs. -pub async fn start_job(config: &HttpTracker, tracker: Arc, version: Version) -> JoinHandle<()> { - match version { - Version::V1 => start_v1(config, tracker.clone()).await, - } -} - +/// /// # Panics /// /// It would panic if the `config::HttpTracker` struct would contain inappropriate values. -async fn start_v1(config: &HttpTracker, tracker: Arc) -> JoinHandle<()> { - let bind_addr = config - .bind_address - .parse::() - .expect("Tracker API bind_address invalid."); - let ssl_enabled = config.ssl_enabled; - let ssl_cert_path = config.ssl_cert_path.clone(); - let ssl_key_path = config.ssl_key_path.clone(); - - let (tx, rx) = oneshot::channel::(); - - // Run the API server - let join_handle = tokio::spawn(async move { - if !ssl_enabled { - info!("Starting Torrust HTTP tracker server on: http://{}", bind_addr); - - let handle = launcher::start(bind_addr, tracker); +/// +pub async fn start_job( + config: &HttpTracker, + tracker: Arc, + form: ServiceRegistrationForm, + version: Version, +) -> Option> { + let socket = config.bind_address; + + let tls = make_rust_tls(&config.tsl_config) + .await + .map(|tls| tls.expect("it should have a valid http tracker tls configuration")); - tx.send(ServerJobStarted()) - .expect("the HTTP tracker server should not be dropped"); + match version { + Version::V1 => Some(start_v1(socket, tls, tracker.clone(), form).await), + } +} - if let Ok(()) = handle.await { - info!("Torrust HTTP tracker server on http://{} stopped", bind_addr); - } - } else if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { - info!("Starting Torrust HTTP tracker server on: https://{}", bind_addr); +async fn start_v1( + socket: SocketAddr, + tls: Option, + tracker: Arc, + form: ServiceRegistrationForm, +) -> JoinHandle<()> { + let server = HttpServer::new(Launcher::new(socket, tls)) + .start(tracker, form) + .await + .expect("it should be able to start to the http tracker"); + + tokio::spawn(async move { + assert!( + !server.state.halt_task.is_closed(), + "Halt channel for HTTP tracker should be open" + ); + server + .state + .task + .await + .expect("it should be able to join to the http tracker task"); + }) +} - let ssl_config = RustlsConfig::from_pem_file(ssl_cert_path.unwrap(), ssl_key_path.unwrap()) - .await - .unwrap(); +#[cfg(test)] +mod tests { + use std::sync::Arc; - let handle = launcher::start_tls(bind_addr, ssl_config, tracker); + use torrust_tracker_test_helpers::configuration::ephemeral_public; - tx.send(ServerJobStarted()) - .expect("the HTTP tracker server should not be dropped"); + use crate::bootstrap::app::initialize_with_configuration; + use crate::bootstrap::jobs::http_tracker::start_job; + use crate::servers::http::Version; + use crate::servers::registar::Registar; - if let Ok(()) = handle.await { - info!("Torrust HTTP tracker server on https://{} stopped", bind_addr); - } - } - }); + #[tokio::test] + async fn it_should_start_http_tracker() { + let cfg = Arc::new(ephemeral_public()); + let http_tracker = cfg.http_trackers.clone().expect("missing HTTP tracker configuration"); + let config = &http_tracker[0]; + let tracker = initialize_with_configuration(&cfg); + let version = Version::V1; - // Wait until the HTTP tracker server job is running - match rx.await { - Ok(_msg) => info!("Torrust HTTP tracker server started"), - Err(e) => panic!("the HTTP tracker server was dropped: {e}"), + start_job(config, tracker, Registar::default().give_form(), version) + .await + .expect("it should be able to join to the http tracker start-job"); } - - join_handle } diff --git a/src/bootstrap/jobs/mod.rs b/src/bootstrap/jobs/mod.rs index c519a9f4b..79a4347ef 100644 --- a/src/bootstrap/jobs/mod.rs +++ b/src/bootstrap/jobs/mod.rs @@ -6,7 +6,101 @@ //! 2. Launch all the application services as concurrent jobs. //! //! This modules contains all the functions needed to start those jobs. +pub mod health_check_api; pub mod http_tracker; pub mod torrent_cleanup; pub mod tracker_apis; pub mod udp_tracker; + +/// This is the message that the "launcher" spawned task sends to the main +/// application process to notify the service was successfully started. +/// +#[derive(Debug)] +pub struct Started { + pub address: std::net::SocketAddr, +} + +pub async fn make_rust_tls(opt_tsl_config: &Option) -> Option> { + match opt_tsl_config { + Some(tsl_config) => { + let cert = tsl_config.ssl_cert_path.clone(); + let key = tsl_config.ssl_key_path.clone(); + + if !cert.exists() || !key.exists() { + return Some(Err(Error::MissingTlsConfig { + location: Location::caller(), + })); + } + + info!("Using https: cert path: {cert}."); + info!("Using https: key path: {key}."); + + Some( + RustlsConfig::from_pem_file(cert, key) + .await + .map_err(|err| Error::BadTlsConfig { + source: (Arc::new(err) as DynError).into(), + }), + ) + } + None => None, + } +} + +#[cfg(test)] +mod tests { + + use camino::Utf8PathBuf; + use torrust_tracker_configuration::TslConfig; + + use super::{make_rust_tls, Error}; + + #[tokio::test] + async fn it_should_error_on_bad_tls_config() { + let err = make_rust_tls(&Some(TslConfig { + ssl_cert_path: Utf8PathBuf::from("bad cert path"), + ssl_key_path: Utf8PathBuf::from("bad key path"), + })) + .await + .expect("tls_was_enabled") + .expect_err("bad_cert_and_key_files"); + + assert!(matches!(err, Error::MissingTlsConfig { location: _ })); + } + + #[tokio::test] + async fn it_should_error_on_missing_cert_or_key_paths() { + let err = make_rust_tls(&Some(TslConfig { + ssl_cert_path: Utf8PathBuf::from(""), + ssl_key_path: Utf8PathBuf::from(""), + })) + .await + .expect("tls_was_enabled") + .expect_err("missing_config"); + + assert!(matches!(err, Error::MissingTlsConfig { location: _ })); + } +} + +use std::panic::Location; +use std::sync::Arc; + +use axum_server::tls_rustls::RustlsConfig; +use thiserror::Error; +use torrust_tracker_configuration::TslConfig; +use torrust_tracker_located_error::{DynError, LocatedError}; +use tracing::info; + +/// Error returned by the Bootstrap Process. +#[derive(Error, Debug)] +pub enum Error { + /// Enabled tls but missing config. + #[error("tls config missing")] + MissingTlsConfig { location: &'static Location<'static> }, + + /// Unable to parse tls Config. + #[error("bad tls config: {source}")] + BadTlsConfig { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, +} diff --git a/src/bootstrap/jobs/torrent_cleanup.rs b/src/bootstrap/jobs/torrent_cleanup.rs index d48769139..6f057fb53 100644 --- a/src/bootstrap/jobs/torrent_cleanup.rs +++ b/src/bootstrap/jobs/torrent_cleanup.rs @@ -13,11 +13,11 @@ use std::sync::Arc; use chrono::Utc; -use log::info; use tokio::task::JoinHandle; -use torrust_tracker_configuration::Configuration; +use torrust_tracker_configuration::Core; +use tracing::info; -use crate::tracker; +use crate::core; /// It starts a jobs for cleaning up the torrent data in the tracker. /// @@ -25,7 +25,7 @@ use crate::tracker; /// /// Refer to [`torrust-tracker-configuration documentation`](https://docs.rs/torrust-tracker-configuration) for more info about that option. #[must_use] -pub fn start_job(config: &Arc, tracker: &Arc) -> JoinHandle<()> { +pub fn start_job(config: &Core, tracker: &Arc) -> JoinHandle<()> { let weak_tracker = std::sync::Arc::downgrade(tracker); let interval = config.inactive_peer_cleanup_interval; @@ -44,7 +44,7 @@ pub fn start_job(config: &Arc, tracker: &Arc) - if let Some(tracker) = weak_tracker.upgrade() { let start_time = Utc::now().time(); info!("Cleaning up torrents.."); - tracker.cleanup_torrents().await; + tracker.cleanup_torrents(); info!("Cleaned up torrents in: {}ms", (Utc::now().time() - start_time).num_milliseconds()); } else { break; diff --git a/src/bootstrap/jobs/tracker_apis.rs b/src/bootstrap/jobs/tracker_apis.rs index 9afe4ab24..ca91fbc83 100644 --- a/src/bootstrap/jobs/tracker_apis.rs +++ b/src/bootstrap/jobs/tracker_apis.rs @@ -4,14 +4,14 @@ //! function starts a the HTTP tracker REST API. //! //! > **NOTICE**: that even thought there is only one job the API has different -//! versions. API consumers can choose which version to use. The API version is -//! part of the URL, for example: `http://localhost:1212/api/v1/stats`. +//! > versions. API consumers can choose which version to use. The API version is +//! > part of the URL, for example: `http://localhost:1212/api/v1/stats`. //! //! The [`tracker_apis::start_job`](crate::bootstrap::jobs::tracker_apis::start_job) //! function spawns a new asynchronous task, that tasks is the "**launcher**". //! The "**launcher**" starts the actual server and sends a message back //! to the main application. The main application waits until receives -//! the message [`ApiServerJobStarted`](crate::bootstrap::jobs::tracker_apis::ApiServerJobStarted) +//! the message [`ApiServerJobStarted`] //! from the "**launcher**". //! //! The "**launcher**" is an intermediary thread that decouples the API server @@ -20,23 +20,25 @@ //! //! Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) //! for the API configuration options. +use std::net::SocketAddr; use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; -use log::info; -use tokio::sync::oneshot; use tokio::task::JoinHandle; -use torrust_tracker_configuration::HttpApi; +use torrust_tracker_configuration::{AccessTokens, HttpApi}; -use crate::servers::apis::server; -use crate::tracker; +use super::make_rust_tls; +use crate::core; +use crate::servers::apis::server::{ApiServer, Launcher}; +use crate::servers::apis::Version; +use crate::servers::registar::ServiceRegistrationForm; /// This is the message that the "launcher" spawned task sends to the main /// application process to notify the API server was successfully started. /// /// > **NOTICE**: it does not mean the API server is ready to receive requests. -/// It only means the new server started. It might take some time to the server -/// to be ready to accept request. +/// > It only means the new server started. It might take some time to the server +/// > to be ready to accept request. #[derive(Debug)] pub struct ApiServerJobStarted(); @@ -49,51 +51,65 @@ pub struct ApiServerJobStarted(); /// # Panics /// /// It would panic if unable to send the `ApiServerJobStarted` notice. -pub async fn start_job(config: &HttpApi, tracker: Arc) -> JoinHandle<()> { - let bind_addr = config - .bind_address - .parse::() - .expect("Tracker API bind_address invalid."); - let ssl_enabled = config.ssl_enabled; - let ssl_cert_path = config.ssl_cert_path.clone(); - let ssl_key_path = config.ssl_key_path.clone(); +/// +/// +pub async fn start_job( + config: &HttpApi, + tracker: Arc, + form: ServiceRegistrationForm, + version: Version, +) -> Option> { + let bind_to = config.bind_address; - let (tx, rx) = oneshot::channel::(); + let tls = make_rust_tls(&config.tsl_config) + .await + .map(|tls| tls.expect("it should have a valid tracker api tls configuration")); - // Run the API server - let join_handle = tokio::spawn(async move { - if !ssl_enabled { - info!("Starting Torrust APIs server on: http://{}", bind_addr); + let access_tokens = Arc::new(config.access_tokens.clone()); - let handle = server::start(bind_addr, tracker); + match version { + Version::V1 => Some(start_v1(bind_to, tls, tracker.clone(), form, access_tokens).await), + } +} - tx.send(ApiServerJobStarted()).expect("the API server should not be dropped"); +async fn start_v1( + socket: SocketAddr, + tls: Option, + tracker: Arc, + form: ServiceRegistrationForm, + access_tokens: Arc, +) -> JoinHandle<()> { + let server = ApiServer::new(Launcher::new(socket, tls)) + .start(tracker, form, access_tokens) + .await + .expect("it should be able to start to the tracker api"); - if let Ok(()) = handle.await { - info!("Torrust APIs server on http://{} stopped", bind_addr); - } - } else if ssl_enabled && ssl_cert_path.is_some() && ssl_key_path.is_some() { - info!("Starting Torrust APIs server on: https://{}", bind_addr); + tokio::spawn(async move { + assert!(!server.state.halt_task.is_closed(), "Halt channel should be open"); + server.state.task.await.expect("failed to close service"); + }) +} - let ssl_config = RustlsConfig::from_pem_file(ssl_cert_path.unwrap(), ssl_key_path.unwrap()) - .await - .unwrap(); +#[cfg(test)] +mod tests { + use std::sync::Arc; - let handle = server::start_tls(bind_addr, ssl_config, tracker); + use torrust_tracker_test_helpers::configuration::ephemeral_public; - tx.send(ApiServerJobStarted()).expect("the API server should not be dropped"); + use crate::bootstrap::app::initialize_with_configuration; + use crate::bootstrap::jobs::tracker_apis::start_job; + use crate::servers::apis::Version; + use crate::servers::registar::Registar; - if let Ok(()) = handle.await { - info!("Torrust APIs server on https://{} stopped", bind_addr); - } - } - }); + #[tokio::test] + async fn it_should_start_http_tracker() { + let cfg = Arc::new(ephemeral_public()); + let config = &cfg.http_api.clone().unwrap(); + let tracker = initialize_with_configuration(&cfg); + let version = Version::V1; - // Wait until the APIs server job is running - match rx.await { - Ok(_msg) => info!("Torrust APIs server started"), - Err(e) => panic!("the API server was dropped: {e}"), + start_job(config, tracker, Registar::default().give_form(), version) + .await + .expect("it should be able to join to the tracker api start-job"); } - - join_handle } diff --git a/src/bootstrap/jobs/udp_tracker.rs b/src/bootstrap/jobs/udp_tracker.rs index 76c465a8d..647461bfc 100644 --- a/src/bootstrap/jobs/udp_tracker.rs +++ b/src/bootstrap/jobs/udp_tracker.rs @@ -4,34 +4,53 @@ //! function starts a new UDP tracker server. //! //! > **NOTICE**: that the application can launch more than one UDP tracker -//! on different ports. Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) -//! for the configuration options. +//! > on different ports. Refer to the [configuration documentation](https://docs.rs/torrust-tracker-configuration) +//! > for the configuration options. use std::sync::Arc; -use log::{error, info, warn}; use tokio::task::JoinHandle; use torrust_tracker_configuration::UdpTracker; +use tracing::debug; -use crate::servers::udp::server::Udp; -use crate::tracker; +use crate::core; +use crate::servers::registar::ServiceRegistrationForm; +use crate::servers::udp::server::spawner::Spawner; +use crate::servers::udp::server::Server; +use crate::servers::udp::UDP_TRACKER_LOG_TARGET; /// It starts a new UDP server with the provided configuration. /// /// It spawns a new asynchronous task for the new UDP server. +/// +/// # Panics +/// +/// It will panic if the API binding address is not a valid socket. +/// It will panic if it is unable to start the UDP service. +/// It will panic if the task did not finish successfully. #[must_use] -pub fn start_job(config: &UdpTracker, tracker: Arc) -> JoinHandle<()> { - let bind_addr = config.bind_address.clone(); +pub async fn start_job(config: &UdpTracker, tracker: Arc, form: ServiceRegistrationForm) -> JoinHandle<()> { + let bind_to = config.bind_address; + + let server = Server::new(Spawner::new(bind_to)) + .start(tracker, form) + .await + .expect("it should be able to start the udp tracker"); tokio::spawn(async move { - match Udp::new(&bind_addr).await { - Ok(udp_server) => { - info!("Starting UDP server on: udp://{}", bind_addr); - udp_server.start(tracker).await; - } - Err(e) => { - warn!("Could not start UDP tracker on: udp://{}", bind_addr); - error!("{}", e); - } - } + debug!(target: UDP_TRACKER_LOG_TARGET, "Wait for launcher (UDP service) to finish ..."); + debug!(target: UDP_TRACKER_LOG_TARGET, "Is halt channel closed before waiting?: {}", server.state.halt_task.is_closed()); + + assert!( + !server.state.halt_task.is_closed(), + "Halt channel for UDP tracker should be open" + ); + + server + .state + .task + .await + .expect("it should be able to join to the udp tracker task"); + + debug!(target: UDP_TRACKER_LOG_TARGET, "Is halt channel closed after finishing the server?: {}", server.state.halt_task.is_closed()); }) } diff --git a/src/bootstrap/logging.rs b/src/bootstrap/logging.rs index 97e26919d..496b3ea45 100644 --- a/src/bootstrap/logging.rs +++ b/src/bootstrap/logging.rs @@ -1,6 +1,7 @@ //! Setup for the application logging. //! -//! It redirects the log info to the standard output with the log level defined in the configuration. +//! It redirects the log info to the standard output with the log threshold +//! defined in the configuration. //! //! - `Off` //! - `Error` @@ -10,51 +11,72 @@ //! - `Trace` //! //! Refer to the [configuration crate documentation](https://docs.rs/torrust-tracker-configuration) to know how to change log settings. -use std::str::FromStr; use std::sync::Once; -use log::{info, LevelFilter}; -use torrust_tracker_configuration::Configuration; +use torrust_tracker_configuration::{Configuration, Threshold}; +use tracing::info; +use tracing::level_filters::LevelFilter; static INIT: Once = Once::new(); -/// It redirects the log info to the standard output with the log level defined in the configuration +/// It redirects the log info to the standard output with the log threshold +/// defined in the configuration. pub fn setup(cfg: &Configuration) { - let level = config_level_or_default(&cfg.log_level); + let tracing_level = map_to_tracing_level_filter(&cfg.logging.threshold); - if level == log::LevelFilter::Off { + if tracing_level == LevelFilter::OFF { return; } INIT.call_once(|| { - stdout_config(level); + tracing_stdout_init(tracing_level, &TraceStyle::Default); }); } -fn config_level_or_default(log_level: &Option) -> LevelFilter { - match log_level { - None => log::LevelFilter::Info, - Some(level) => LevelFilter::from_str(level).unwrap(), +fn map_to_tracing_level_filter(threshold: &Threshold) -> LevelFilter { + match threshold { + Threshold::Off => LevelFilter::OFF, + Threshold::Error => LevelFilter::ERROR, + Threshold::Warn => LevelFilter::WARN, + Threshold::Info => LevelFilter::INFO, + Threshold::Debug => LevelFilter::DEBUG, + Threshold::Trace => LevelFilter::TRACE, } } -fn stdout_config(level: LevelFilter) { - if let Err(_err) = fern::Dispatch::new() - .format(|out, message, record| { - out.finish(format_args!( - "{} [{}][{}] {}", - chrono::Local::now().format("%+"), - record.target(), - record.level(), - message - )); - }) - .level(level) - .chain(std::io::stdout()) - .apply() - { - panic!("Failed to initialize logging.") - } +fn tracing_stdout_init(filter: LevelFilter, style: &TraceStyle) { + let builder = tracing_subscriber::fmt().with_max_level(filter).with_ansi(true); + + let () = match style { + TraceStyle::Default => builder.init(), + TraceStyle::Pretty(display_filename) => builder.pretty().with_file(*display_filename).init(), + TraceStyle::Compact => builder.compact().init(), + TraceStyle::Json => builder.json().init(), + }; + + info!("Logging initialized"); +} - info!("logging initialized."); +#[derive(Debug)] +pub enum TraceStyle { + Default, + Pretty(bool), + Compact, + Json, +} + +impl std::fmt::Display for TraceStyle { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let style = match self { + TraceStyle::Default => "Default Style", + TraceStyle::Pretty(path) => match path { + true => "Pretty Style with File Paths", + false => "Pretty Style without File Paths", + }, + TraceStyle::Compact => "Compact Style", + TraceStyle::Json => "Json Format", + }; + + f.write_str(style) + } } diff --git a/src/console/ci/e2e/docker.rs b/src/console/ci/e2e/docker.rs new file mode 100644 index 000000000..32a0c3e56 --- /dev/null +++ b/src/console/ci/e2e/docker.rs @@ -0,0 +1,236 @@ +//! Docker command wrapper. +use std::io; +use std::process::{Command, Output}; +use std::thread::sleep; +use std::time::{Duration, Instant}; + +use tracing::{debug, info}; + +/// Docker command wrapper. +pub struct Docker {} + +#[derive(Clone, Debug)] +pub struct RunningContainer { + pub image: String, + pub name: String, + pub output: Output, +} + +impl Drop for RunningContainer { + /// Ensures that the temporary container is stopped when the struct goes out + /// of scope. + fn drop(&mut self) { + info!("Dropping running container: {}", self.name); + if Docker::is_container_running(&self.name) { + let _unused = Docker::stop(self); + } + } +} + +/// `docker run` command options. +pub struct RunOptions { + pub env_vars: Vec<(String, String)>, + pub ports: Vec, +} + +impl Docker { + /// Builds a Docker image from a given Dockerfile. + /// + /// # Errors + /// + /// Will fail if the docker build command fails. + pub fn build(dockerfile: &str, tag: &str) -> io::Result<()> { + let status = Command::new("docker") + .args(["build", "-f", dockerfile, "-t", tag, "."]) + .status()?; + + if status.success() { + Ok(()) + } else { + Err(io::Error::new( + io::ErrorKind::Other, + format!("Failed to build Docker image from dockerfile {dockerfile}"), + )) + } + } + + /// Runs a Docker container from a given image with multiple environment variables. + /// + /// # Arguments + /// + /// * `image` - The Docker image to run. + /// * `container` - The name for the Docker container. + /// * `env_vars` - A slice of tuples, each representing an environment variable as ("KEY", "value"). + /// + /// # Errors + /// + /// Will fail if the docker run command fails. + pub fn run(image: &str, container: &str, options: &RunOptions) -> io::Result { + let initial_args = vec![ + "run".to_string(), + "--detach".to_string(), + "--name".to_string(), + container.to_string(), + ]; + + // Add environment variables + let mut env_var_args: Vec = vec![]; + for (key, value) in &options.env_vars { + env_var_args.push("--env".to_string()); + env_var_args.push(format!("{key}={value}")); + } + + // Add port mappings + let mut port_args: Vec = vec![]; + for port in &options.ports { + port_args.push("--publish".to_string()); + port_args.push(port.to_string()); + } + + let args = [initial_args, env_var_args, port_args, [image.to_string()].to_vec()].concat(); + + debug!("Docker run args: {:?}", args); + + let output = Command::new("docker").args(args).output()?; + + if output.status.success() { + Ok(RunningContainer { + image: image.to_owned(), + name: container.to_owned(), + output, + }) + } else { + Err(io::Error::new( + io::ErrorKind::Other, + format!("Failed to run Docker image {image}"), + )) + } + } + + /// Stops a Docker container. + /// + /// # Errors + /// + /// Will fail if the docker stop command fails. + pub fn stop(container: &RunningContainer) -> io::Result<()> { + let status = Command::new("docker").args(["stop", &container.name]).status()?; + + if status.success() { + Ok(()) + } else { + Err(io::Error::new( + io::ErrorKind::Other, + format!("Failed to stop Docker container {}", container.name), + )) + } + } + + /// Removes a Docker container. + /// + /// # Errors + /// + /// Will fail if the docker rm command fails. + pub fn remove(container: &str) -> io::Result<()> { + let status = Command::new("docker").args(["rm", "-f", container]).status()?; + + if status.success() { + Ok(()) + } else { + Err(io::Error::new( + io::ErrorKind::Other, + format!("Failed to remove Docker container {container}"), + )) + } + } + + /// Fetches logs from a Docker container. + /// + /// # Errors + /// + /// Will fail if the docker logs command fails. + pub fn logs(container: &str) -> io::Result { + let output = Command::new("docker").args(["logs", container]).output()?; + + if output.status.success() { + Ok(String::from_utf8_lossy(&output.stdout).to_string()) + } else { + Err(io::Error::new( + io::ErrorKind::Other, + format!("Failed to fetch logs from Docker container {container}"), + )) + } + } + + /// Checks if a Docker container is healthy. + #[must_use] + pub fn wait_until_is_healthy(name: &str, timeout: Duration) -> bool { + let start = Instant::now(); + + while start.elapsed() < timeout { + let Ok(output) = Command::new("docker") + .args(["ps", "-f", &format!("name={name}"), "--format", "{{.Status}}"]) + .output() + else { + return false; + }; + + let output_str = String::from_utf8_lossy(&output.stdout); + + info!("Waiting until container is healthy: {:?}", output_str); + + if output_str.contains("(healthy)") { + return true; + } + + sleep(Duration::from_secs(1)); + } + + false + } + + /// Checks if a Docker container is running. + /// + /// # Arguments + /// + /// * `container` - The name of the Docker container. + /// + /// # Returns + /// + /// `true` if the container is running, `false` otherwise. + #[must_use] + pub fn is_container_running(container: &str) -> bool { + match Command::new("docker") + .args(["ps", "-f", &format!("name={container}"), "--format", "{{.Names}}"]) + .output() + { + Ok(output) => { + let output_str = String::from_utf8_lossy(&output.stdout); + output_str.contains(container) + } + Err(_) => false, + } + } + + /// Checks if a Docker container exists. + /// + /// # Arguments + /// + /// * `container` - The name of the Docker container. + /// + /// # Returns + /// + /// `true` if the container exists, `false` otherwise. + #[must_use] + pub fn container_exist(container: &str) -> bool { + match Command::new("docker") + .args(["ps", "-a", "-f", &format!("name={container}"), "--format", "{{.Names}}"]) + .output() + { + Ok(output) => { + let output_str = String::from_utf8_lossy(&output.stdout); + output_str.contains(container) + } + Err(_) => false, + } + } +} diff --git a/src/console/ci/e2e/logs_parser.rs b/src/console/ci/e2e/logs_parser.rs new file mode 100644 index 000000000..95648a2b5 --- /dev/null +++ b/src/console/ci/e2e/logs_parser.rs @@ -0,0 +1,179 @@ +//! Utilities to parse Torrust Tracker logs. +use regex::Regex; +use serde::{Deserialize, Serialize}; + +use crate::servers::health_check_api::HEALTH_CHECK_API_LOG_TARGET; +use crate::servers::http::HTTP_TRACKER_LOG_TARGET; +use crate::servers::logging::STARTED_ON; +use crate::servers::udp::UDP_TRACKER_LOG_TARGET; + +const INFO_THRESHOLD: &str = "INFO"; + +#[derive(Serialize, Deserialize, Debug, Default)] +pub struct RunningServices { + pub udp_trackers: Vec, + pub http_trackers: Vec, + pub health_checks: Vec, +} + +impl RunningServices { + /// It parses the tracker logs to extract the running services. + /// + /// For example, from this logs: + /// + /// ```text + /// Loading configuration from default configuration file: `./share/default/config/tracker.development.sqlite3.toml` ... + /// 2024-06-10T16:07:39.989540Z INFO torrust_tracker::bootstrap::logging: Logging initialized + /// 2024-06-10T16:07:39.990205Z INFO UDP TRACKER: Starting on: udp://0.0.0.0:6868 + /// 2024-06-10T16:07:39.990215Z INFO UDP TRACKER: Started on: udp://0.0.0.0:6868 + /// 2024-06-10T16:07:39.990244Z INFO UDP TRACKER: Starting on: udp://0.0.0.0:6969 + /// 2024-06-10T16:07:39.990255Z INFO UDP TRACKER: Started on: udp://0.0.0.0:6969 + /// 2024-06-10T16:07:39.990261Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled + /// 2024-06-10T16:07:39.990303Z INFO HTTP TRACKER: Starting on: http://0.0.0.0:7070 + /// 2024-06-10T16:07:39.990439Z INFO HTTP TRACKER: Started on: http://0.0.0.0:7070 + /// 2024-06-10T16:07:39.990448Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled + /// 2024-06-10T16:07:39.990563Z INFO API: Starting on http://127.0.0.1:1212 + /// 2024-06-10T16:07:39.990565Z INFO API: Started on http://127.0.0.1:1212 + /// 2024-06-10T16:07:39.990577Z INFO HEALTH CHECK API: Starting on: http://127.0.0.1:1313 + /// 2024-06-10T16:07:39.990638Z INFO HEALTH CHECK API: Started on: http://127.0.0.1:1313 + /// ``` + /// + /// It would extract these services: + /// + /// ```json + /// { + /// "udp_trackers": [ + /// "127.0.0.1:6969" + /// ], + /// "http_trackers": [ + /// "http://127.0.0.1:7070" + /// ], + /// "health_checks": [ + /// "http://127.0.0.1:1313/health_check" + /// ] + /// } + /// ``` + /// + /// NOTICE: Using colors in the console output could affect this method + /// due to the hidden control chars. + /// + /// # Panics + /// + /// Will panic is the regular expression to parse the services can't be compiled. + #[must_use] + pub fn parse_from_logs(logs: &str) -> Self { + let mut udp_trackers: Vec = Vec::new(); + let mut http_trackers: Vec = Vec::new(); + let mut health_checks: Vec = Vec::new(); + + let udp_re = Regex::new(&format!("{STARTED_ON}: {}", r"udp://([0-9.]+:[0-9]+)")).unwrap(); + let http_re = Regex::new(&format!("{STARTED_ON}: {}", r"(https?://[0-9.]+:[0-9]+)")).unwrap(); // DevSkim: ignore DS137138 + let health_re = Regex::new(&format!("{STARTED_ON}: {}", r"(https?://[0-9.]+:[0-9]+)")).unwrap(); // DevSkim: ignore DS137138 + let ansi_escape_re = Regex::new(r"\x1b\[[0-9;]*m").unwrap(); + + for line in logs.lines() { + let clean_line = ansi_escape_re.replace_all(line, ""); + + if !line.contains(INFO_THRESHOLD) { + continue; + }; + + if line.contains(UDP_TRACKER_LOG_TARGET) { + if let Some(captures) = udp_re.captures(&clean_line) { + let address = Self::replace_wildcard_ip_with_localhost(&captures[1]); + udp_trackers.push(address); + } + } else if line.contains(HTTP_TRACKER_LOG_TARGET) { + if let Some(captures) = http_re.captures(&clean_line) { + let address = Self::replace_wildcard_ip_with_localhost(&captures[1]); + http_trackers.push(address); + } + } else if line.contains(HEALTH_CHECK_API_LOG_TARGET) { + if let Some(captures) = health_re.captures(&clean_line) { + let address = format!("{}/health_check", Self::replace_wildcard_ip_with_localhost(&captures[1])); + health_checks.push(address); + } + } + } + + Self { + udp_trackers, + http_trackers, + health_checks, + } + } + + fn replace_wildcard_ip_with_localhost(address: &str) -> String { + address.replace("0.0.0.0", "127.0.0.1") + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn it_should_parse_from_logs_with_valid_logs() { + let logs = r" + Loading configuration from default configuration file: `./share/default/config/tracker.development.sqlite3.toml` ... + 2024-06-10T16:07:39.989540Z INFO torrust_tracker::bootstrap::logging: Logging initialized + 2024-06-10T16:07:39.990244Z INFO UDP TRACKER: Starting on: udp://0.0.0.0:6969 + 2024-06-10T16:07:39.990255Z INFO UDP TRACKER: Started on: udp://0.0.0.0:6969 + 2024-06-10T16:07:39.990261Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled + 2024-06-10T16:07:39.990303Z INFO HTTP TRACKER: Starting on: http://0.0.0.0:7070 + 2024-06-10T16:07:39.990439Z INFO HTTP TRACKER: Started on: http://0.0.0.0:7070 + 2024-06-10T16:07:39.990448Z INFO torrust_tracker::bootstrap::jobs: TLS not enabled + 2024-06-10T16:07:39.990563Z INFO API: Starting on http://127.0.0.1:1212 + 2024-06-10T16:07:39.990565Z INFO API: Started on http://127.0.0.1:1212 + 2024-06-10T16:07:39.990577Z INFO HEALTH CHECK API: Starting on: http://127.0.0.1:1313 + 2024-06-10T16:07:39.990638Z INFO HEALTH CHECK API: Started on: http://127.0.0.1:1313 + "; + + let running_services = RunningServices::parse_from_logs(logs); + + assert_eq!(running_services.udp_trackers, vec!["127.0.0.1:6969"]); + assert_eq!(running_services.http_trackers, vec!["http://127.0.0.1:7070"]); + assert_eq!(running_services.health_checks, vec!["http://127.0.0.1:1313/health_check"]); + } + + #[test] + fn it_should_support_colored_output() { + let logs = "\x1b[2m2024-06-14T14:40:13.028824Z\x1b[0m \x1b[33mINFO\x1b[0m \x1b[2mUDP TRACKER\x1b[0m: \x1b[37mStarted on: udp://0.0.0.0:6969\x1b[0m"; + + let running_services = RunningServices::parse_from_logs(logs); + + assert_eq!(running_services.udp_trackers, vec!["127.0.0.1:6969"]); + } + + #[test] + fn it_should_ignore_logs_with_no_matching_lines() { + let logs = "[Other Service][INFO] Started on: 0.0.0.0:7070"; + + let running_services = RunningServices::parse_from_logs(logs); + + assert!(running_services.udp_trackers.is_empty()); + assert!(running_services.http_trackers.is_empty()); + assert!(running_services.health_checks.is_empty()); + } + + #[test] + fn it_should_parse_multiple_services() { + let logs = " + 2024-06-10T16:07:39.990205Z INFO UDP TRACKER: Starting on: udp://0.0.0.0:6868 + 2024-06-10T16:07:39.990215Z INFO UDP TRACKER: Started on: udp://0.0.0.0:6868 + + 2024-06-10T16:07:39.990244Z INFO UDP TRACKER: Starting on: udp://0.0.0.0:6969 + 2024-06-10T16:07:39.990255Z INFO UDP TRACKER: Started on: udp://0.0.0.0:6969 + "; + + let running_services = RunningServices::parse_from_logs(logs); + + assert_eq!(running_services.udp_trackers, vec!["127.0.0.1:6868", "127.0.0.1:6969"]); + } + + #[test] + fn it_should_replace_wildcard_ip_with_localhost() { + let address = "0.0.0.0:8080"; + assert_eq!(RunningServices::replace_wildcard_ip_with_localhost(address), "127.0.0.1:8080"); + } +} diff --git a/src/console/ci/e2e/mod.rs b/src/console/ci/e2e/mod.rs new file mode 100644 index 000000000..58a876cbe --- /dev/null +++ b/src/console/ci/e2e/mod.rs @@ -0,0 +1,6 @@ +//! E2E tests scripts. +pub mod docker; +pub mod logs_parser; +pub mod runner; +pub mod tracker_checker; +pub mod tracker_container; diff --git a/src/console/ci/e2e/runner.rs b/src/console/ci/e2e/runner.rs new file mode 100644 index 000000000..f2285938b --- /dev/null +++ b/src/console/ci/e2e/runner.rs @@ -0,0 +1,159 @@ +//! Program to run E2E tests. +//! +//! You can execute it with (passing a TOML config file path): +//! +//! ```text +//! cargo run --bin e2e_tests_runner -- --config-toml-path "./share/default/config/tracker.e2e.container.sqlite3.toml" +//! ``` +//! +//! Or: +//! +//! ```text +//! TORRUST_TRACKER_CONFIG_TOML_PATH="./share/default/config/tracker.e2e.container.sqlite3.toml" cargo run --bin e2e_tests_runner" +//! ``` +//! +//! You can execute it with (directly passing TOML config): +//! +//! ```text +//! TORRUST_TRACKER_CONFIG_TOML=$(cat "./share/default/config/tracker.e2e.container.sqlite3.toml") cargo run --bin e2e_tests_runner +//! ``` +use std::path::PathBuf; + +use anyhow::Context; +use clap::Parser; +use tracing::info; +use tracing::level_filters::LevelFilter; + +use super::tracker_container::TrackerContainer; +use crate::console::ci::e2e::docker::RunOptions; +use crate::console::ci::e2e::logs_parser::RunningServices; +use crate::console::ci::e2e::tracker_checker::{self}; + +/* code-review: + - We use always the same docker image name. Should we use a random image name (tag)? + - We use the name image name we use in other workflows `torrust-tracker:local`. + Should we use a different one like `torrust-tracker:e2e`? + - We remove the container after running tests but not the container image. + Should we remove the image too? +*/ + +const CONTAINER_IMAGE: &str = "torrust-tracker:local"; +const CONTAINER_NAME_PREFIX: &str = "tracker_"; + +#[derive(Parser, Debug)] +#[clap(author, version, about, long_about = None)] +struct Args { + /// Path to the JSON configuration file. + #[clap(short, long, env = "TORRUST_TRACKER_CONFIG_TOML_PATH")] + config_toml_path: Option, + + /// Direct configuration content in JSON. + #[clap(env = "TORRUST_TRACKER_CONFIG_TOML", hide_env_values = true)] + config_toml: Option, +} + +/// Script to run E2E tests. +/// +/// # Errors +/// +/// Will return an error if it can't load the tracker configuration from arguments. +/// +/// # Panics +/// +/// Will panic if it can't not perform any of the operations. +pub fn run() -> anyhow::Result<()> { + tracing_stdout_init(LevelFilter::INFO); + + let args = Args::parse(); + + let tracker_config = load_tracker_configuration(&args)?; + + info!("tracker config:\n{tracker_config}"); + + let mut tracker_container = TrackerContainer::new(CONTAINER_IMAGE, CONTAINER_NAME_PREFIX); + + tracker_container.build_image(); + + // code-review: if we want to use port 0 we don't know which ports we have to open. + // Besides, if we don't use port 0 we should get the port numbers from the tracker configuration. + // We could not use docker, but the intention was to create E2E tests including containerization. + let options = RunOptions { + env_vars: vec![("TORRUST_TRACKER_CONFIG_TOML".to_string(), tracker_config.to_string())], + ports: vec![ + "6969:6969/udp".to_string(), + "7070:7070/tcp".to_string(), + "1212:1212/tcp".to_string(), + "1313:1313/tcp".to_string(), + ], + }; + + tracker_container.run(&options); + + let running_services = tracker_container.running_services(); + + info!( + "Running services:\n {}", + serde_json::to_string_pretty(&running_services).expect("running services to be serializable to JSON") + ); + + assert_there_is_at_least_one_service_per_type(&running_services); + + let tracker_checker_config = + serde_json::to_string_pretty(&running_services).expect("Running services should be serialized into JSON"); + + tracker_checker::run(&tracker_checker_config).expect("All tracker services should be running correctly"); + + // More E2E tests could be added here in the future. + // For example: `cargo test ...` for only E2E tests, using this shared test env. + + tracker_container.stop(); + + tracker_container.remove(); + + info!("Tracker container final state:\n{:#?}", tracker_container); + + Ok(()) +} + +fn tracing_stdout_init(filter: LevelFilter) { + tracing_subscriber::fmt().with_max_level(filter).init(); + info!("Logging initialized"); +} + +fn load_tracker_configuration(args: &Args) -> anyhow::Result { + match (args.config_toml_path.clone(), args.config_toml.clone()) { + (Some(config_path), _) => { + info!( + "Reading tracker configuration from file: {} ...", + config_path.to_string_lossy() + ); + load_config_from_file(&config_path) + } + (_, Some(config_content)) => { + info!("Reading tracker configuration from env var ..."); + Ok(config_content) + } + _ => Err(anyhow::anyhow!("No configuration provided")), + } +} + +fn load_config_from_file(path: &PathBuf) -> anyhow::Result { + let config = std::fs::read_to_string(path).with_context(|| format!("CSan't read config file {path:?}"))?; + + Ok(config) +} + +fn assert_there_is_at_least_one_service_per_type(running_services: &RunningServices) { + assert!( + !running_services.udp_trackers.is_empty(), + "At least one UDP tracker should be enabled in E2E tests configuration" + ); + assert!( + !running_services.http_trackers.is_empty(), + "At least one HTTP tracker should be enabled in E2E tests configuration" + ); + assert!( + !running_services.health_checks.is_empty(), + "At least one Health Check should be enabled in E2E tests configuration" + ); +} diff --git a/src/console/ci/e2e/tracker_checker.rs b/src/console/ci/e2e/tracker_checker.rs new file mode 100644 index 000000000..b2fd7df2e --- /dev/null +++ b/src/console/ci/e2e/tracker_checker.rs @@ -0,0 +1,25 @@ +use std::io; +use std::process::Command; + +use tracing::info; + +/// Runs the Tracker Checker. +/// +/// # Errors +/// +/// Will return an error if the Tracker Checker fails. +pub fn run(config_content: &str) -> io::Result<()> { + info!("Running Tracker Checker: TORRUST_CHECKER_CONFIG=[config] cargo run --bin tracker_checker"); + info!("Tracker Checker config:\n{config_content}"); + + let status = Command::new("cargo") + .env("TORRUST_CHECKER_CONFIG", config_content) + .args(["run", "--bin", "tracker_checker"]) + .status()?; + + if status.success() { + Ok(()) + } else { + Err(io::Error::new(io::ErrorKind::Other, "Failed to run Tracker Checker")) + } +} diff --git a/src/console/ci/e2e/tracker_container.rs b/src/console/ci/e2e/tracker_container.rs new file mode 100644 index 000000000..528fd3c62 --- /dev/null +++ b/src/console/ci/e2e/tracker_container.rs @@ -0,0 +1,135 @@ +use std::time::Duration; + +use rand::distributions::Alphanumeric; +use rand::Rng; +use tracing::{error, info}; + +use super::docker::{RunOptions, RunningContainer}; +use super::logs_parser::RunningServices; +use crate::console::ci::e2e::docker::Docker; + +#[derive(Debug)] +pub struct TrackerContainer { + pub image: String, + pub name: String, + pub running: Option, +} + +impl Drop for TrackerContainer { + /// Ensures that the temporary container is removed when the + /// struct goes out of scope. + fn drop(&mut self) { + info!("Dropping tracker container: {}", self.name); + if Docker::container_exist(&self.name) { + let _unused = Docker::remove(&self.name); + } + } +} + +impl TrackerContainer { + #[must_use] + pub fn new(tag: &str, container_name_prefix: &str) -> Self { + Self { + image: tag.to_owned(), + name: Self::generate_random_container_name(container_name_prefix), + running: None, + } + } + + /// # Panics + /// + /// Will panic if it can't build the docker image. + pub fn build_image(&self) { + info!("Building tracker container image with tag: {} ...", self.image); + Docker::build("./Containerfile", &self.image).expect("A tracker local docker image should be built"); + } + + /// # Panics + /// + /// Will panic if it can't run the container. + pub fn run(&mut self, options: &RunOptions) { + info!("Running docker tracker image: {} ...", self.name); + + let container = Docker::run(&self.image, &self.name, options).expect("A tracker local docker image should be running"); + + info!("Waiting for the container {} to be healthy ...", self.name); + + let is_healthy = Docker::wait_until_is_healthy(&self.name, Duration::from_secs(10)); + + assert!(is_healthy, "Unhealthy tracker container: {}", &self.name); + + info!("Container {} is healthy ...", &self.name); + + self.running = Some(container); + + self.assert_there_are_no_panics_in_logs(); + } + + /// # Panics + /// + /// Will panic if it can't get the logs from the running container. + #[must_use] + pub fn running_services(&self) -> RunningServices { + let logs = Docker::logs(&self.name).expect("Logs should be captured from running container"); + + info!("Parsing running services from logs. Logs :\n{logs}"); + + RunningServices::parse_from_logs(&logs) + } + + /// # Panics + /// + /// Will panic if it can't stop the container. + pub fn stop(&mut self) { + match &self.running { + Some(container) => { + info!("Stopping docker tracker container: {} ...", self.name); + + Docker::stop(container).expect("Container should be stopped"); + + self.assert_there_are_no_panics_in_logs(); + } + None => { + if Docker::is_container_running(&self.name) { + error!("Tracker container {} was started manually", self.name); + } else { + info!("Docker tracker container is not running: {} ...", self.name); + } + } + } + + self.running = None; + } + + /// # Panics + /// + /// Will panic if it can't remove the container. + pub fn remove(&self) { + if let Some(_running_container) = &self.running { + error!("Can't remove running container: {} ...", self.name); + } else { + info!("Removing docker tracker container: {} ...", self.name); + Docker::remove(&self.name).expect("Container should be removed"); + } + } + + fn generate_random_container_name(prefix: &str) -> String { + let rand_string: String = rand::thread_rng() + .sample_iter(&Alphanumeric) + .take(20) + .map(char::from) + .collect(); + + format!("{prefix}{rand_string}") + } + + fn assert_there_are_no_panics_in_logs(&self) { + let logs = Docker::logs(&self.name).expect("Logs should be captured from running container"); + + assert!( + !(logs.contains(" panicked at ") || logs.contains("RUST_BACKTRACE=1")), + "{}", + format!("Panics found is logs:\n{logs}") + ); + } +} diff --git a/src/console/ci/mod.rs b/src/console/ci/mod.rs new file mode 100644 index 000000000..6eac3e120 --- /dev/null +++ b/src/console/ci/mod.rs @@ -0,0 +1,2 @@ +//! Continuos integration scripts. +pub mod e2e; diff --git a/src/console/clients/checker/app.rs b/src/console/clients/checker/app.rs new file mode 100644 index 000000000..3bafc2661 --- /dev/null +++ b/src/console/clients/checker/app.rs @@ -0,0 +1,121 @@ +//! Program to run checks against running trackers. +//! +//! Run providing a config file path: +//! +//! ```text +//! cargo run --bin tracker_checker -- --config-path "./share/default/config/tracker_checker.json" +//! TORRUST_CHECKER_CONFIG_PATH="./share/default/config/tracker_checker.json" cargo run --bin tracker_checker +//! ``` +//! +//! Run providing the configuration: +//! +//! ```text +//! TORRUST_CHECKER_CONFIG=$(cat "./share/default/config/tracker_checker.json") cargo run --bin tracker_checker +//! ``` +//! +//! Another real example to test the Torrust demo tracker: +//! +//! ```text +//! TORRUST_CHECKER_CONFIG='{ +//! "udp_trackers": ["144.126.245.19:6969"], +//! "http_trackers": ["https://tracker.torrust-demo.com"], +//! "health_checks": ["https://tracker.torrust-demo.com/api/health_check"] +//! }' cargo run --bin tracker_checker +//! ``` +//! +//! The output should be something like the following: +//! +//! ```json +//! { +//! "udp_trackers": [ +//! { +//! "url": "144.126.245.19:6969", +//! "status": { +//! "code": "ok", +//! "message": "" +//! } +//! } +//! ], +//! "http_trackers": [ +//! { +//! "url": "https://tracker.torrust-demo.com/", +//! "status": { +//! "code": "ok", +//! "message": "" +//! } +//! } +//! ], +//! "health_checks": [ +//! { +//! "url": "https://tracker.torrust-demo.com/api/health_check", +//! "status": { +//! "code": "ok", +//! "message": "" +//! } +//! } +//! ] +//! } +//! ``` +use std::path::PathBuf; +use std::sync::Arc; + +use anyhow::{Context, Result}; +use clap::Parser; +use tracing::debug; +use tracing::level_filters::LevelFilter; + +use super::config::Configuration; +use super::console::Console; +use super::service::{CheckResult, Service}; +use crate::console::clients::checker::config::parse_from_json; + +#[derive(Parser, Debug)] +#[clap(author, version, about, long_about = None)] +struct Args { + /// Path to the JSON configuration file. + #[clap(short, long, env = "TORRUST_CHECKER_CONFIG_PATH")] + config_path: Option, + + /// Direct configuration content in JSON. + #[clap(env = "TORRUST_CHECKER_CONFIG", hide_env_values = true)] + config_content: Option, +} + +/// # Errors +/// +/// Will return an error if the configuration was not provided. +pub async fn run() -> Result> { + tracing_stdout_init(LevelFilter::INFO); + + let args = Args::parse(); + + let config = setup_config(args)?; + + let console_printer = Console {}; + + let service = Service { + config: Arc::new(config), + console: console_printer, + }; + + service.run_checks().await.context("it should run the check tasks") +} + +fn tracing_stdout_init(filter: LevelFilter) { + tracing_subscriber::fmt().with_max_level(filter).init(); + debug!("Logging initialized"); +} + +fn setup_config(args: Args) -> Result { + match (args.config_path, args.config_content) { + (Some(config_path), _) => load_config_from_file(&config_path), + (_, Some(config_content)) => parse_from_json(&config_content).context("invalid config format"), + _ => Err(anyhow::anyhow!("no configuration provided")), + } +} + +fn load_config_from_file(path: &PathBuf) -> Result { + let file_content = std::fs::read_to_string(path).with_context(|| format!("can't read config file {path:?}"))?; + + parse_from_json(&file_content).context("invalid config format") +} diff --git a/src/console/clients/checker/checks/health.rs b/src/console/clients/checker/checks/health.rs new file mode 100644 index 000000000..b1fb79148 --- /dev/null +++ b/src/console/clients/checker/checks/health.rs @@ -0,0 +1,77 @@ +use std::sync::Arc; +use std::time::Duration; + +use anyhow::Result; +use hyper::StatusCode; +use reqwest::{Client as HttpClient, Response}; +use serde::Serialize; +use thiserror::Error; +use url::Url; + +#[derive(Debug, Clone, Error, Serialize)] +#[serde(into = "String")] +pub enum Error { + #[error("Failed to Build a Http Client: {err:?}")] + ClientBuildingError { err: Arc }, + #[error("Heath check failed to get a response: {err:?}")] + ResponseError { err: Arc }, + #[error("Http check returned a non-success code: \"{code}\" with the response: \"{response:?}\"")] + UnsuccessfulResponse { code: StatusCode, response: Arc }, +} + +impl From for String { + fn from(value: Error) -> Self { + value.to_string() + } +} + +#[derive(Debug, Clone, Serialize)] +pub struct Checks { + url: Url, + result: Result, +} + +pub async fn run(health_checks: Vec, timeout: Duration) -> Vec> { + let mut results = Vec::default(); + + tracing::debug!("Health checks ..."); + + for url in health_checks { + let result = match run_health_check(url.clone(), timeout).await { + Ok(response) => Ok(response.status().to_string()), + Err(err) => Err(err), + }; + + let check = Checks { url, result }; + + if check.result.is_err() { + results.push(Err(check)); + } else { + results.push(Ok(check)); + } + } + + results +} + +async fn run_health_check(url: Url, timeout: Duration) -> Result { + let client = HttpClient::builder() + .timeout(timeout) + .build() + .map_err(|e| Error::ClientBuildingError { err: e.into() })?; + + let response = client + .get(url.clone()) + .send() + .await + .map_err(|e| Error::ResponseError { err: e.into() })?; + + if response.status().is_success() { + Ok(response) + } else { + Err(Error::UnsuccessfulResponse { + code: response.status(), + response: response.into(), + }) + } +} diff --git a/src/console/clients/checker/checks/http.rs b/src/console/clients/checker/checks/http.rs new file mode 100644 index 000000000..8abbeb669 --- /dev/null +++ b/src/console/clients/checker/checks/http.rs @@ -0,0 +1,101 @@ +use std::str::FromStr as _; +use std::time::Duration; + +use serde::Serialize; +use torrust_tracker_primitives::info_hash::InfoHash; +use url::Url; + +use crate::console::clients::http::Error; +use crate::shared::bit_torrent::tracker::http::client::responses::announce::Announce; +use crate::shared::bit_torrent::tracker::http::client::responses::scrape; +use crate::shared::bit_torrent::tracker::http::client::{requests, Client}; + +#[derive(Debug, Clone, Serialize)] +pub struct Checks { + url: Url, + results: Vec<(Check, Result<(), Error>)>, +} + +#[derive(Debug, Clone, Serialize)] +pub enum Check { + Announce, + Scrape, +} + +pub async fn run(http_trackers: Vec, timeout: Duration) -> Vec> { + let mut results = Vec::default(); + + tracing::debug!("HTTP trackers ..."); + + for ref url in http_trackers { + let mut checks = Checks { + url: url.clone(), + results: Vec::default(), + }; + + // Announce + { + let check = check_http_announce(url, timeout).await.map(|_| ()); + + checks.results.push((Check::Announce, check)); + } + + // Scrape + { + let check = check_http_scrape(url, timeout).await.map(|_| ()); + + checks.results.push((Check::Scrape, check)); + } + + if checks.results.iter().any(|f| f.1.is_err()) { + results.push(Err(checks)); + } else { + results.push(Ok(checks)); + } + } + + results +} + +async fn check_http_announce(url: &Url, timeout: Duration) -> Result { + let info_hash_str = "9c38422213e30bff212b30c360d26f9a02136422".to_string(); // # DevSkim: ignore DS173237 + let info_hash = InfoHash::from_str(&info_hash_str).expect("a valid info-hash is required"); + + let client = Client::new(url.clone(), timeout).map_err(|err| Error::HttpClientError { err })?; + + let response = client + .announce( + &requests::announce::QueryBuilder::with_default_values() + .with_info_hash(&info_hash) + .query(), + ) + .await + .map_err(|err| Error::HttpClientError { err })?; + + let response = response.bytes().await.map_err(|e| Error::ResponseError { err: e.into() })?; + + let response = serde_bencode::from_bytes::(&response).map_err(|e| Error::ParseBencodeError { + data: response, + err: e.into(), + })?; + + Ok(response) +} + +async fn check_http_scrape(url: &Url, timeout: Duration) -> Result { + let info_hashes: Vec = vec!["9c38422213e30bff212b30c360d26f9a02136422".to_string()]; // # DevSkim: ignore DS173237 + let query = requests::scrape::Query::try_from(info_hashes).expect("a valid array of info-hashes is required"); + + let client = Client::new(url.clone(), timeout).map_err(|err| Error::HttpClientError { err })?; + + let response = client.scrape(&query).await.map_err(|err| Error::HttpClientError { err })?; + + let response = response.bytes().await.map_err(|e| Error::ResponseError { err: e.into() })?; + + let response = scrape::Response::try_from_bencoded(&response).map_err(|e| Error::BencodeParseError { + data: response, + err: e.into(), + })?; + + Ok(response) +} diff --git a/src/console/clients/checker/checks/mod.rs b/src/console/clients/checker/checks/mod.rs new file mode 100644 index 000000000..f8b03f749 --- /dev/null +++ b/src/console/clients/checker/checks/mod.rs @@ -0,0 +1,4 @@ +pub mod health; +pub mod http; +pub mod structs; +pub mod udp; diff --git a/src/console/clients/checker/checks/structs.rs b/src/console/clients/checker/checks/structs.rs new file mode 100644 index 000000000..d28e20c04 --- /dev/null +++ b/src/console/clients/checker/checks/structs.rs @@ -0,0 +1,12 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize)] +pub struct Status { + pub code: String, + pub message: String, +} +#[derive(Serialize, Deserialize)] +pub struct CheckerOutput { + pub url: String, + pub status: Status, +} diff --git a/src/console/clients/checker/checks/udp.rs b/src/console/clients/checker/checks/udp.rs new file mode 100644 index 000000000..dd4d5e639 --- /dev/null +++ b/src/console/clients/checker/checks/udp.rs @@ -0,0 +1,98 @@ +use std::net::SocketAddr; +use std::time::Duration; + +use aquatic_udp_protocol::TransactionId; +use hex_literal::hex; +use serde::Serialize; +use torrust_tracker_primitives::info_hash::InfoHash; + +use crate::console::clients::udp::checker::Client; +use crate::console::clients::udp::Error; + +#[derive(Debug, Clone, Serialize)] +pub struct Checks { + remote_addr: SocketAddr, + results: Vec<(Check, Result<(), Error>)>, +} + +#[derive(Debug, Clone, Serialize)] +pub enum Check { + Setup, + Connect, + Announce, + Scrape, +} + +#[allow(clippy::missing_panics_doc)] +pub async fn run(udp_trackers: Vec, timeout: Duration) -> Vec> { + let mut results = Vec::default(); + + tracing::debug!("UDP trackers ..."); + + let info_hash = InfoHash(hex!("9c38422213e30bff212b30c360d26f9a02136422")); // # DevSkim: ignore DS173237 + + for remote_addr in udp_trackers { + let mut checks = Checks { + remote_addr, + results: Vec::default(), + }; + + tracing::debug!("UDP tracker: {:?}", remote_addr); + + // Setup + let client = match Client::new(remote_addr, timeout).await { + Ok(client) => { + checks.results.push((Check::Setup, Ok(()))); + client + } + Err(err) => { + checks.results.push((Check::Setup, Err(err))); + results.push(Err(checks)); + break; + } + }; + + let transaction_id = TransactionId::new(1); + + // Connect Remote + let connection_id = match client.send_connection_request(transaction_id).await { + Ok(connection_id) => { + checks.results.push((Check::Connect, Ok(()))); + connection_id + } + Err(err) => { + checks.results.push((Check::Connect, Err(err))); + results.push(Err(checks)); + break; + } + }; + + // Announce + { + let check = client + .send_announce_request(transaction_id, connection_id, info_hash) + .await + .map(|_| ()); + + checks.results.push((Check::Announce, check)); + } + + // Scrape + { + let check = client + .send_scrape_request(connection_id, transaction_id, &[info_hash]) + .await + .map(|_| ()); + + checks.results.push((Check::Announce, check)); + } + + if checks.results.iter().any(|f| f.1.is_err()) { + results.push(Err(checks)); + } else { + results.push(Ok(checks)); + } + } + + results +} diff --git a/src/console/clients/checker/config.rs b/src/console/clients/checker/config.rs new file mode 100644 index 000000000..6e44d889b --- /dev/null +++ b/src/console/clients/checker/config.rs @@ -0,0 +1,154 @@ +use std::error::Error; +use std::fmt; +use std::net::SocketAddr; + +use reqwest::Url as ServiceUrl; +use serde::Deserialize; + +/// It parses the configuration from a JSON format. +/// +/// # Errors +/// +/// Will return an error if the configuration is not valid. +/// +/// # Panics +/// +/// Will panic if unable to read the configuration file. +pub fn parse_from_json(json: &str) -> Result { + let plain_config: PlainConfiguration = serde_json::from_str(json).map_err(ConfigurationError::JsonParseError)?; + Configuration::try_from(plain_config) +} + +/// DTO for the configuration to serialize/deserialize configuration. +/// +/// Configuration does not need to be valid. +#[derive(Deserialize)] +struct PlainConfiguration { + pub udp_trackers: Vec, + pub http_trackers: Vec, + pub health_checks: Vec, +} + +/// Validated configuration +pub struct Configuration { + pub udp_trackers: Vec, + pub http_trackers: Vec, + pub health_checks: Vec, +} + +#[derive(Debug)] +pub enum ConfigurationError { + JsonParseError(serde_json::Error), + InvalidUdpAddress(std::net::AddrParseError), + InvalidUrl(url::ParseError), +} + +impl Error for ConfigurationError {} + +impl fmt::Display for ConfigurationError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ConfigurationError::JsonParseError(e) => write!(f, "JSON parse error: {e}"), + ConfigurationError::InvalidUdpAddress(e) => write!(f, "Invalid UDP address: {e}"), + ConfigurationError::InvalidUrl(e) => write!(f, "Invalid URL: {e}"), + } + } +} + +impl TryFrom for Configuration { + type Error = ConfigurationError; + + fn try_from(plain_config: PlainConfiguration) -> Result { + let udp_trackers = plain_config + .udp_trackers + .into_iter() + .map(|s| s.parse::().map_err(ConfigurationError::InvalidUdpAddress)) + .collect::, _>>()?; + + let http_trackers = plain_config + .http_trackers + .into_iter() + .map(|s| s.parse::().map_err(ConfigurationError::InvalidUrl)) + .collect::, _>>()?; + + let health_checks = plain_config + .health_checks + .into_iter() + .map(|s| s.parse::().map_err(ConfigurationError::InvalidUrl)) + .collect::, _>>()?; + + Ok(Configuration { + udp_trackers, + http_trackers, + health_checks, + }) + } +} + +#[cfg(test)] +mod tests { + use std::net::{IpAddr, Ipv4Addr}; + + use super::*; + + #[test] + fn configuration_should_be_build_from_plain_serializable_configuration() { + let dto = PlainConfiguration { + udp_trackers: vec!["127.0.0.1:8080".to_string()], + http_trackers: vec!["http://127.0.0.1:8080".to_string()], + health_checks: vec!["http://127.0.0.1:8080/health".to_string()], + }; + + let config = Configuration::try_from(dto).expect("A valid configuration"); + + assert_eq!( + config.udp_trackers, + vec![SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080)] + ); + assert_eq!( + config.http_trackers, + vec![ServiceUrl::parse("http://127.0.0.1:8080").unwrap()] + ); + assert_eq!( + config.health_checks, + vec![ServiceUrl::parse("http://127.0.0.1:8080/health").unwrap()] + ); + } + + mod building_configuration_from_plan_configuration { + use crate::console::clients::checker::config::{Configuration, PlainConfiguration}; + + #[test] + fn it_should_fail_when_a_tracker_udp_address_is_invalid() { + let plain_config = PlainConfiguration { + udp_trackers: vec!["invalid_address".to_string()], + http_trackers: vec![], + health_checks: vec![], + }; + + assert!(Configuration::try_from(plain_config).is_err()); + } + + #[test] + fn it_should_fail_when_a_tracker_http_address_is_invalid() { + let plain_config = PlainConfiguration { + udp_trackers: vec![], + http_trackers: vec!["not_a_url".to_string()], + health_checks: vec![], + }; + + assert!(Configuration::try_from(plain_config).is_err()); + } + + #[test] + fn it_should_fail_when_a_health_check_http_address_is_invalid() { + let plain_config = PlainConfiguration { + udp_trackers: vec![], + http_trackers: vec![], + health_checks: vec!["not_a_url".to_string()], + }; + + assert!(Configuration::try_from(plain_config).is_err()); + } + } +} diff --git a/src/console/clients/checker/console.rs b/src/console/clients/checker/console.rs new file mode 100644 index 000000000..b55c559fc --- /dev/null +++ b/src/console/clients/checker/console.rs @@ -0,0 +1,38 @@ +use super::printer::{Printer, CLEAR_SCREEN}; + +pub struct Console {} + +impl Default for Console { + fn default() -> Self { + Self::new() + } +} + +impl Console { + #[must_use] + pub fn new() -> Self { + Self {} + } +} + +impl Printer for Console { + fn clear(&self) { + self.print(CLEAR_SCREEN); + } + + fn print(&self, output: &str) { + print!("{}", &output); + } + + fn eprint(&self, output: &str) { + eprint!("{}", &output); + } + + fn println(&self, output: &str) { + println!("{}", &output); + } + + fn eprintln(&self, output: &str) { + eprintln!("{}", &output); + } +} diff --git a/src/console/clients/checker/logger.rs b/src/console/clients/checker/logger.rs new file mode 100644 index 000000000..50e97189f --- /dev/null +++ b/src/console/clients/checker/logger.rs @@ -0,0 +1,72 @@ +use std::cell::RefCell; + +use super::printer::{Printer, CLEAR_SCREEN}; + +pub struct Logger { + output: RefCell, +} + +impl Default for Logger { + fn default() -> Self { + Self::new() + } +} + +impl Logger { + #[must_use] + pub fn new() -> Self { + Self { + output: RefCell::new(String::new()), + } + } + + pub fn log(&self) -> String { + self.output.borrow().clone() + } +} + +impl Printer for Logger { + fn clear(&self) { + self.print(CLEAR_SCREEN); + } + + fn print(&self, output: &str) { + *self.output.borrow_mut() = format!("{}{}", self.output.borrow(), &output); + } + + fn eprint(&self, output: &str) { + *self.output.borrow_mut() = format!("{}{}", self.output.borrow(), &output); + } + + fn println(&self, output: &str) { + self.print(&format!("{}/n", &output)); + } + + fn eprintln(&self, output: &str) { + self.eprint(&format!("{}/n", &output)); + } +} + +#[cfg(test)] +mod tests { + use crate::console::clients::checker::logger::Logger; + use crate::console::clients::checker::printer::{Printer, CLEAR_SCREEN}; + + #[test] + fn should_capture_the_clear_screen_command() { + let console_logger = Logger::new(); + + console_logger.clear(); + + assert_eq!(CLEAR_SCREEN, console_logger.log()); + } + + #[test] + fn should_capture_the_print_command_output() { + let console_logger = Logger::new(); + + console_logger.print("OUTPUT"); + + assert_eq!("OUTPUT", console_logger.log()); + } +} diff --git a/src/console/clients/checker/mod.rs b/src/console/clients/checker/mod.rs new file mode 100644 index 000000000..d26a4a686 --- /dev/null +++ b/src/console/clients/checker/mod.rs @@ -0,0 +1,7 @@ +pub mod app; +pub mod checks; +pub mod config; +pub mod console; +pub mod logger; +pub mod printer; +pub mod service; diff --git a/src/console/clients/checker/printer.rs b/src/console/clients/checker/printer.rs new file mode 100644 index 000000000..d590dfedb --- /dev/null +++ b/src/console/clients/checker/printer.rs @@ -0,0 +1,9 @@ +pub const CLEAR_SCREEN: &str = "\x1B[2J\x1B[1;1H"; + +pub trait Printer { + fn clear(&self); + fn print(&self, output: &str); + fn eprint(&self, output: &str); + fn println(&self, output: &str); + fn eprintln(&self, output: &str); +} diff --git a/src/console/clients/checker/service.rs b/src/console/clients/checker/service.rs new file mode 100644 index 000000000..acd312d8c --- /dev/null +++ b/src/console/clients/checker/service.rs @@ -0,0 +1,62 @@ +use std::sync::Arc; + +use futures::FutureExt as _; +use serde::Serialize; +use tokio::task::{JoinError, JoinSet}; +use torrust_tracker_configuration::DEFAULT_TIMEOUT; + +use super::checks::{health, http, udp}; +use super::config::Configuration; +use super::console::Console; +use crate::console::clients::checker::printer::Printer; + +pub struct Service { + pub(crate) config: Arc, + pub(crate) console: Console, +} + +#[derive(Debug, Clone, Serialize)] +pub enum CheckResult { + Udp(Result), + Http(Result), + Health(Result), +} + +impl Service { + /// # Errors + /// + /// It will return an error if some of the tests panic or otherwise fail to run. + /// On success it will return a vector of `Ok(())` of [`CheckResult`]. + /// + /// # Panics + /// + /// It would panic if `serde_json` produces invalid json for the `to_string_pretty` function. + pub async fn run_checks(self) -> Result, JoinError> { + tracing::info!("Running checks for trackers ..."); + + let mut check_results = Vec::default(); + + let mut checks = JoinSet::new(); + checks.spawn( + udp::run(self.config.udp_trackers.clone(), DEFAULT_TIMEOUT).map(|mut f| f.drain(..).map(CheckResult::Udp).collect()), + ); + checks.spawn( + http::run(self.config.http_trackers.clone(), DEFAULT_TIMEOUT) + .map(|mut f| f.drain(..).map(CheckResult::Http).collect()), + ); + checks.spawn( + health::run(self.config.health_checks.clone(), DEFAULT_TIMEOUT) + .map(|mut f| f.drain(..).map(CheckResult::Health).collect()), + ); + + while let Some(results) = checks.join_next().await { + check_results.append(&mut results?); + } + + let json_output = serde_json::json!(check_results); + self.console + .println(&serde_json::to_string_pretty(&json_output).expect("it should consume valid json")); + + Ok(check_results) + } +} diff --git a/src/console/clients/http/app.rs b/src/console/clients/http/app.rs new file mode 100644 index 000000000..a54db5f8b --- /dev/null +++ b/src/console/clients/http/app.rs @@ -0,0 +1,102 @@ +//! HTTP Tracker client: +//! +//! Examples: +//! +//! `Announce` request: +//! +//! ```text +//! cargo run --bin http_tracker_client announce http://127.0.0.1:7070 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` +//! +//! `Scrape` request: +//! +//! ```text +//! cargo run --bin http_tracker_client scrape http://127.0.0.1:7070 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` +use std::str::FromStr; +use std::time::Duration; + +use anyhow::Context; +use clap::{Parser, Subcommand}; +use reqwest::Url; +use torrust_tracker_configuration::DEFAULT_TIMEOUT; +use torrust_tracker_primitives::info_hash::InfoHash; + +use crate::shared::bit_torrent::tracker::http::client::requests::announce::QueryBuilder; +use crate::shared::bit_torrent::tracker::http::client::responses::announce::Announce; +use crate::shared::bit_torrent::tracker::http::client::responses::scrape; +use crate::shared::bit_torrent::tracker::http::client::{requests, Client}; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + #[command(subcommand)] + command: Command, +} + +#[derive(Subcommand, Debug)] +enum Command { + Announce { tracker_url: String, info_hash: String }, + Scrape { tracker_url: String, info_hashes: Vec }, +} + +/// # Errors +/// +/// Will return an error if the command fails. +pub async fn run() -> anyhow::Result<()> { + let args = Args::parse(); + + match args.command { + Command::Announce { tracker_url, info_hash } => { + announce_command(tracker_url, info_hash, DEFAULT_TIMEOUT).await?; + } + Command::Scrape { + tracker_url, + info_hashes, + } => { + scrape_command(&tracker_url, &info_hashes, DEFAULT_TIMEOUT).await?; + } + } + + Ok(()) +} + +async fn announce_command(tracker_url: String, info_hash: String, timeout: Duration) -> anyhow::Result<()> { + let base_url = Url::parse(&tracker_url).context("failed to parse HTTP tracker base URL")?; + let info_hash = + InfoHash::from_str(&info_hash).expect("Invalid infohash. Example infohash: `9c38422213e30bff212b30c360d26f9a02136422`"); + + let response = Client::new(base_url, timeout)? + .announce(&QueryBuilder::with_default_values().with_info_hash(&info_hash).query()) + .await?; + + let body = response.bytes().await?; + + let announce_response: Announce = serde_bencode::from_bytes(&body) + .unwrap_or_else(|_| panic!("response body should be a valid announce response, got: \"{:#?}\"", &body)); + + let json = serde_json::to_string(&announce_response).context("failed to serialize scrape response into JSON")?; + + println!("{json}"); + + Ok(()) +} + +async fn scrape_command(tracker_url: &str, info_hashes: &[String], timeout: Duration) -> anyhow::Result<()> { + let base_url = Url::parse(tracker_url).context("failed to parse HTTP tracker base URL")?; + + let query = requests::scrape::Query::try_from(info_hashes).context("failed to parse infohashes")?; + + let response = Client::new(base_url, timeout)?.scrape(&query).await?; + + let body = response.bytes().await?; + + let scrape_response = scrape::Response::try_from_bencoded(&body) + .unwrap_or_else(|_| panic!("response body should be a valid scrape response, got: \"{:#?}\"", &body)); + + let json = serde_json::to_string(&scrape_response).context("failed to serialize scrape response into JSON")?; + + println!("{json}"); + + Ok(()) +} diff --git a/src/console/clients/http/mod.rs b/src/console/clients/http/mod.rs new file mode 100644 index 000000000..eaa71957f --- /dev/null +++ b/src/console/clients/http/mod.rs @@ -0,0 +1,36 @@ +use std::sync::Arc; + +use serde::Serialize; +use thiserror::Error; + +use crate::shared::bit_torrent::tracker::http::client::responses::scrape::BencodeParseError; + +pub mod app; + +#[derive(Debug, Clone, Error, Serialize)] +#[serde(into = "String")] +pub enum Error { + #[error("Http request did not receive a response within the timeout: {err:?}")] + HttpClientError { + err: crate::shared::bit_torrent::tracker::http::client::Error, + }, + #[error("Http failed to get a response at all: {err:?}")] + ResponseError { err: Arc }, + #[error("Failed to deserialize the bencoded response data with the error: \"{err:?}\"")] + ParseBencodeError { + data: hyper::body::Bytes, + err: Arc, + }, + + #[error("Failed to deserialize the bencoded response data with the error: \"{err:?}\"")] + BencodeParseError { + data: hyper::body::Bytes, + err: Arc, + }, +} + +impl From for String { + fn from(value: Error) -> Self { + value.to_string() + } +} diff --git a/src/console/clients/mod.rs b/src/console/clients/mod.rs new file mode 100644 index 000000000..8492f8ba5 --- /dev/null +++ b/src/console/clients/mod.rs @@ -0,0 +1,4 @@ +//! Console clients. +pub mod checker; +pub mod http; +pub mod udp; diff --git a/src/console/clients/udp/app.rs b/src/console/clients/udp/app.rs new file mode 100644 index 000000000..af6f10611 --- /dev/null +++ b/src/console/clients/udp/app.rs @@ -0,0 +1,209 @@ +//! UDP Tracker client: +//! +//! Examples: +//! +//! Announce request: +//! +//! ```text +//! cargo run --bin udp_tracker_client announce 127.0.0.1:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` +//! +//! Announce response: +//! +//! ```json +//! { +//! "transaction_id": -888840697 +//! "announce_interval": 120, +//! "leechers": 0, +//! "seeders": 1, +//! "peers": [ +//! "123.123.123.123:51289" +//! ], +//! } +//! ``` +//! +//! Scrape request: +//! +//! ```text +//! cargo run --bin udp_tracker_client scrape 127.0.0.1:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` +//! +//! Scrape response: +//! +//! ```json +//! { +//! "transaction_id": -888840697, +//! "torrent_stats": [ +//! { +//! "completed": 0, +//! "leechers": 0, +//! "seeders": 0 +//! }, +//! { +//! "completed": 0, +//! "leechers": 0, +//! "seeders": 0 +//! } +//! ] +//! } +//! ``` +//! +//! You can use an URL with instead of the socket address. For example: +//! +//! ```text +//! cargo run --bin udp_tracker_client scrape udp://localhost:6969 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! cargo run --bin udp_tracker_client scrape udp://localhost:6969/scrape 9c38422213e30bff212b30c360d26f9a02136422 | jq +//! ``` +//! +//! The protocol (`udp://`) in the URL is mandatory. The path (`\scrape`) is optional. It always uses `\scrape`. +use std::net::{SocketAddr, ToSocketAddrs}; +use std::str::FromStr; + +use anyhow::Context; +use aquatic_udp_protocol::{Response, TransactionId}; +use clap::{Parser, Subcommand}; +use torrust_tracker_configuration::DEFAULT_TIMEOUT; +use torrust_tracker_primitives::info_hash::InfoHash as TorrustInfoHash; +use tracing::debug; +use tracing::level_filters::LevelFilter; +use url::Url; + +use super::Error; +use crate::console::clients::udp::checker; +use crate::console::clients::udp::responses::dto::SerializableResponse; +use crate::console::clients::udp::responses::json::ToJson; + +const RANDOM_TRANSACTION_ID: i32 = -888_840_697; + +#[derive(Parser, Debug)] +#[command(author, version, about, long_about = None)] +struct Args { + #[command(subcommand)] + command: Command, +} + +#[derive(Subcommand, Debug)] +enum Command { + Announce { + #[arg(value_parser = parse_socket_addr)] + tracker_socket_addr: SocketAddr, + #[arg(value_parser = parse_info_hash)] + info_hash: TorrustInfoHash, + }, + Scrape { + #[arg(value_parser = parse_socket_addr)] + tracker_socket_addr: SocketAddr, + #[arg(value_parser = parse_info_hash, num_args = 1..=74, value_delimiter = ' ')] + info_hashes: Vec, + }, +} + +/// # Errors +/// +/// Will return an error if the command fails. +/// +/// +pub async fn run() -> anyhow::Result<()> { + tracing_stdout_init(LevelFilter::INFO); + + let args = Args::parse(); + + let response = match args.command { + Command::Announce { + tracker_socket_addr: remote_addr, + info_hash, + } => handle_announce(remote_addr, &info_hash).await?, + Command::Scrape { + tracker_socket_addr: remote_addr, + info_hashes, + } => handle_scrape(remote_addr, &info_hashes).await?, + }; + + let response: SerializableResponse = response.into(); + let response_json = response.to_json_string()?; + + print!("{response_json}"); + + Ok(()) +} + +fn tracing_stdout_init(filter: LevelFilter) { + tracing_subscriber::fmt().with_max_level(filter).init(); + debug!("Logging initialized"); +} + +async fn handle_announce(remote_addr: SocketAddr, info_hash: &TorrustInfoHash) -> Result { + let transaction_id = TransactionId::new(RANDOM_TRANSACTION_ID); + + let client = checker::Client::new(remote_addr, DEFAULT_TIMEOUT).await?; + + let connection_id = client.send_connection_request(transaction_id).await?; + + client.send_announce_request(transaction_id, connection_id, *info_hash).await +} + +async fn handle_scrape(remote_addr: SocketAddr, info_hashes: &[TorrustInfoHash]) -> Result { + let transaction_id = TransactionId::new(RANDOM_TRANSACTION_ID); + + let client = checker::Client::new(remote_addr, DEFAULT_TIMEOUT).await?; + + let connection_id = client.send_connection_request(transaction_id).await?; + + client.send_scrape_request(connection_id, transaction_id, info_hashes).await +} + +fn parse_socket_addr(tracker_socket_addr_str: &str) -> anyhow::Result { + debug!("Tracker socket address: {tracker_socket_addr_str:#?}"); + + // Check if the address is a valid URL. If so, extract the host and port. + let resolved_addr = if let Ok(url) = Url::parse(tracker_socket_addr_str) { + debug!("Tracker socket address URL: {url:?}"); + + let host = url + .host_str() + .with_context(|| format!("invalid host in URL: `{tracker_socket_addr_str}`"))? + .to_owned(); + + let port = url + .port() + .with_context(|| format!("port not found in URL: `{tracker_socket_addr_str}`"))? + .to_owned(); + + (host, port) + } else { + // If not a URL, assume it's a host:port pair. + + let parts: Vec<&str> = tracker_socket_addr_str.split(':').collect(); + + if parts.len() != 2 { + return Err(anyhow::anyhow!( + "invalid address format: `{}`. Expected format is host:port", + tracker_socket_addr_str + )); + } + + let host = parts[0].to_owned(); + + let port = parts[1] + .parse::() + .with_context(|| format!("invalid port: `{}`", parts[1]))? + .to_owned(); + + (host, port) + }; + + debug!("Resolved address: {resolved_addr:#?}"); + + // Perform DNS resolution. + let socket_addrs: Vec<_> = resolved_addr.to_socket_addrs()?.collect(); + if socket_addrs.is_empty() { + Err(anyhow::anyhow!("DNS resolution failed for `{}`", tracker_socket_addr_str)) + } else { + Ok(socket_addrs[0]) + } +} + +fn parse_info_hash(info_hash_str: &str) -> anyhow::Result { + TorrustInfoHash::from_str(info_hash_str) + .map_err(|e| anyhow::Error::msg(format!("failed to parse info-hash `{info_hash_str}`: {e:?}"))) +} diff --git a/src/console/clients/udp/checker.rs b/src/console/clients/udp/checker.rs new file mode 100644 index 000000000..49f0ac41f --- /dev/null +++ b/src/console/clients/udp/checker.rs @@ -0,0 +1,178 @@ +use std::net::{Ipv4Addr, SocketAddr}; +use std::num::NonZeroU16; +use std::time::Duration; + +use aquatic_udp_protocol::common::InfoHash; +use aquatic_udp_protocol::{ + AnnounceActionPlaceholder, AnnounceEvent, AnnounceRequest, ConnectRequest, ConnectionId, NumberOfBytes, NumberOfPeers, + PeerId, PeerKey, Port, Response, ScrapeRequest, TransactionId, +}; +use torrust_tracker_primitives::info_hash::InfoHash as TorrustInfoHash; +use tracing::debug; + +use super::Error; +use crate::shared::bit_torrent::tracker::udp::client::UdpTrackerClient; + +/// A UDP Tracker client to make test requests (checks). +#[derive(Debug)] +pub struct Client { + client: UdpTrackerClient, +} + +impl Client { + /// Creates a new `[Client]` for checking a UDP Tracker Service + /// + /// # Errors + /// + /// It will error if unable to bind and connect to the udp remote address. + /// + pub async fn new(remote_addr: SocketAddr, timeout: Duration) -> Result { + let client = UdpTrackerClient::new(remote_addr, timeout) + .await + .map_err(|err| Error::UnableToBindAndConnect { remote_addr, err })?; + + Ok(Self { client }) + } + + /// Returns the local addr of this [`Client`]. + /// + /// # Errors + /// + /// This function will return an error if the socket is somehow not bound. + pub fn local_addr(&self) -> std::io::Result { + self.client.client.socket.local_addr() + } + + /// Sends a connection request to the UDP Tracker server. + /// + /// # Errors + /// + /// Will return and error if + /// + /// - It can't connect to the remote UDP socket. + /// - It can't make a connection request successfully to the remote UDP + /// server (after successfully connecting to the remote UDP socket). + /// + /// # Panics + /// + /// Will panic if it receives an unexpected response. + pub async fn send_connection_request(&self, transaction_id: TransactionId) -> Result { + debug!("Sending connection request with transaction id: {transaction_id:#?}"); + + let connect_request = ConnectRequest { transaction_id }; + + let _ = self + .client + .send(connect_request.into()) + .await + .map_err(|err| Error::UnableToSendConnectionRequest { err })?; + + let response = self + .client + .receive() + .await + .map_err(|err| Error::UnableToReceiveConnectResponse { err })?; + + match response { + Response::Connect(connect_response) => Ok(connect_response.connection_id), + _ => Err(Error::UnexpectedConnectionResponse { response }), + } + } + + /// Sends an announce request to the UDP Tracker server. + /// + /// # Errors + /// + /// Will return and error if the client is not connected. You have to connect + /// before calling this function. + /// + /// # Panics + /// + /// It will panic if the `local_address` has a zero port. + pub async fn send_announce_request( + &self, + transaction_id: TransactionId, + connection_id: ConnectionId, + info_hash: TorrustInfoHash, + ) -> Result { + debug!("Sending announce request with transaction id: {transaction_id:#?}"); + + let port = NonZeroU16::new( + self.client + .client + .socket + .local_addr() + .expect("it should get the local address") + .port(), + ) + .expect("it should no be zero"); + + let announce_request = AnnounceRequest { + connection_id, + action_placeholder: AnnounceActionPlaceholder::default(), + transaction_id, + info_hash: InfoHash(info_hash.bytes()), + peer_id: PeerId(*b"-qB00000000000000001"), + bytes_downloaded: NumberOfBytes(0i64.into()), + bytes_uploaded: NumberOfBytes(0i64.into()), + bytes_left: NumberOfBytes(0i64.into()), + event: AnnounceEvent::Started.into(), + ip_address: Ipv4Addr::new(0, 0, 0, 0).into(), + key: PeerKey::new(0i32), + peers_wanted: NumberOfPeers(1i32.into()), + port: Port::new(port), + }; + + let _ = self + .client + .send(announce_request.into()) + .await + .map_err(|err| Error::UnableToSendAnnounceRequest { err })?; + + let response = self + .client + .receive() + .await + .map_err(|err| Error::UnableToReceiveAnnounceResponse { err })?; + + Ok(response) + } + + /// Sends a scrape request to the UDP Tracker server. + /// + /// # Errors + /// + /// Will return and error if the client is not connected. You have to connect + /// before calling this function. + pub async fn send_scrape_request( + &self, + connection_id: ConnectionId, + transaction_id: TransactionId, + info_hashes: &[TorrustInfoHash], + ) -> Result { + debug!("Sending scrape request with transaction id: {transaction_id:#?}"); + + let scrape_request = ScrapeRequest { + connection_id, + transaction_id, + info_hashes: info_hashes + .iter() + .map(|torrust_info_hash| InfoHash(torrust_info_hash.bytes())) + .collect(), + }; + + let _ = self + .client + .send(scrape_request.into()) + .await + .map_err(|err| Error::UnableToSendScrapeRequest { err })?; + + let response = self + .client + .receive() + .await + .map_err(|err| Error::UnableToReceiveScrapeResponse { err })?; + + Ok(response) + } +} diff --git a/src/console/clients/udp/mod.rs b/src/console/clients/udp/mod.rs new file mode 100644 index 000000000..b92bed096 --- /dev/null +++ b/src/console/clients/udp/mod.rs @@ -0,0 +1,51 @@ +use std::net::SocketAddr; + +use aquatic_udp_protocol::Response; +use serde::Serialize; +use thiserror::Error; + +use crate::shared::bit_torrent::tracker::udp; + +pub mod app; +pub mod checker; +pub mod responses; + +#[derive(Error, Debug, Clone, Serialize)] +#[serde(into = "String")] +pub enum Error { + #[error("Failed to Connect to: {remote_addr}, with error: {err}")] + UnableToBindAndConnect { remote_addr: SocketAddr, err: udp::Error }, + + #[error("Failed to send a connection request, with error: {err}")] + UnableToSendConnectionRequest { err: udp::Error }, + + #[error("Failed to receive a connect response, with error: {err}")] + UnableToReceiveConnectResponse { err: udp::Error }, + + #[error("Failed to send a announce request, with error: {err}")] + UnableToSendAnnounceRequest { err: udp::Error }, + + #[error("Failed to receive a announce response, with error: {err}")] + UnableToReceiveAnnounceResponse { err: udp::Error }, + + #[error("Failed to send a scrape request, with error: {err}")] + UnableToSendScrapeRequest { err: udp::Error }, + + #[error("Failed to receive a scrape response, with error: {err}")] + UnableToReceiveScrapeResponse { err: udp::Error }, + + #[error("Failed to receive a response, with error: {err}")] + UnableToReceiveResponse { err: udp::Error }, + + #[error("Failed to get local address for connection: {err}")] + UnableToGetLocalAddr { err: udp::Error }, + + #[error("Failed to get a connection response: {response:?}")] + UnexpectedConnectionResponse { response: Response }, +} + +impl From for String { + fn from(value: Error) -> Self { + value.to_string() + } +} diff --git a/src/console/clients/udp/responses/dto.rs b/src/console/clients/udp/responses/dto.rs new file mode 100644 index 000000000..93320b0f7 --- /dev/null +++ b/src/console/clients/udp/responses/dto.rs @@ -0,0 +1,128 @@ +//! Aquatic responses are not serializable. These are the serializable wrappers. +use std::net::{Ipv4Addr, Ipv6Addr}; + +use aquatic_udp_protocol::Response::{self}; +use aquatic_udp_protocol::{AnnounceResponse, ConnectResponse, ErrorResponse, Ipv4AddrBytes, Ipv6AddrBytes, ScrapeResponse}; +use serde::Serialize; + +#[derive(Serialize)] +pub enum SerializableResponse { + Connect(ConnectSerializableResponse), + AnnounceIpv4(AnnounceSerializableResponse), + AnnounceIpv6(AnnounceSerializableResponse), + Scrape(ScrapeSerializableResponse), + Error(ErrorSerializableResponse), +} + +impl From for SerializableResponse { + fn from(response: Response) -> Self { + match response { + Response::Connect(response) => SerializableResponse::Connect(ConnectSerializableResponse::from(response)), + Response::AnnounceIpv4(response) => SerializableResponse::AnnounceIpv4(AnnounceSerializableResponse::from(response)), + Response::AnnounceIpv6(response) => SerializableResponse::AnnounceIpv6(AnnounceSerializableResponse::from(response)), + Response::Scrape(response) => SerializableResponse::Scrape(ScrapeSerializableResponse::from(response)), + Response::Error(response) => SerializableResponse::Error(ErrorSerializableResponse::from(response)), + } + } +} + +#[derive(Serialize)] +pub struct ConnectSerializableResponse { + transaction_id: i32, + connection_id: i64, +} + +impl From for ConnectSerializableResponse { + fn from(connect: ConnectResponse) -> Self { + Self { + transaction_id: connect.transaction_id.0.into(), + connection_id: connect.connection_id.0.into(), + } + } +} + +#[derive(Serialize)] +pub struct AnnounceSerializableResponse { + transaction_id: i32, + announce_interval: i32, + leechers: i32, + seeders: i32, + peers: Vec, +} + +impl From> for AnnounceSerializableResponse { + fn from(announce: AnnounceResponse) -> Self { + Self { + transaction_id: announce.fixed.transaction_id.0.into(), + announce_interval: announce.fixed.announce_interval.0.into(), + leechers: announce.fixed.leechers.0.into(), + seeders: announce.fixed.seeders.0.into(), + peers: announce + .peers + .iter() + .map(|peer| format!("{}:{}", Ipv4Addr::from(peer.ip_address), peer.port.0)) + .collect::>(), + } + } +} + +impl From> for AnnounceSerializableResponse { + fn from(announce: AnnounceResponse) -> Self { + Self { + transaction_id: announce.fixed.transaction_id.0.into(), + announce_interval: announce.fixed.announce_interval.0.into(), + leechers: announce.fixed.leechers.0.into(), + seeders: announce.fixed.seeders.0.into(), + peers: announce + .peers + .iter() + .map(|peer| format!("{}:{}", Ipv6Addr::from(peer.ip_address), peer.port.0)) + .collect::>(), + } + } +} + +#[derive(Serialize)] +pub struct ScrapeSerializableResponse { + transaction_id: i32, + torrent_stats: Vec, +} + +impl From for ScrapeSerializableResponse { + fn from(scrape: ScrapeResponse) -> Self { + Self { + transaction_id: scrape.transaction_id.0.into(), + torrent_stats: scrape + .torrent_stats + .iter() + .map(|torrent_scrape_statistics| TorrentStats { + seeders: torrent_scrape_statistics.seeders.0.into(), + completed: torrent_scrape_statistics.completed.0.into(), + leechers: torrent_scrape_statistics.leechers.0.into(), + }) + .collect::>(), + } + } +} + +#[derive(Serialize)] +pub struct ErrorSerializableResponse { + transaction_id: i32, + message: String, +} + +impl From for ErrorSerializableResponse { + fn from(error: ErrorResponse) -> Self { + Self { + transaction_id: error.transaction_id.0.into(), + message: error.message.to_string(), + } + } +} + +#[derive(Serialize)] +struct TorrentStats { + seeders: i32, + completed: i32, + leechers: i32, +} diff --git a/src/console/clients/udp/responses/json.rs b/src/console/clients/udp/responses/json.rs new file mode 100644 index 000000000..5d2bd6b89 --- /dev/null +++ b/src/console/clients/udp/responses/json.rs @@ -0,0 +1,25 @@ +use anyhow::Context; +use serde::Serialize; + +use super::dto::SerializableResponse; + +#[allow(clippy::module_name_repetitions)] +pub trait ToJson { + /// + /// Returns a string with the JSON serialized version of the response + /// + /// # Errors + /// + /// Will return an error if serialization fails. + /// + fn to_json_string(&self) -> anyhow::Result + where + Self: Serialize, + { + let pretty_json = serde_json::to_string_pretty(self).context("response JSON serialization")?; + + Ok(pretty_json) + } +} + +impl ToJson for SerializableResponse {} diff --git a/src/console/clients/udp/responses/mod.rs b/src/console/clients/udp/responses/mod.rs new file mode 100644 index 000000000..e6d2e5e51 --- /dev/null +++ b/src/console/clients/udp/responses/mod.rs @@ -0,0 +1,2 @@ +pub mod dto; +pub mod json; diff --git a/src/console/mod.rs b/src/console/mod.rs new file mode 100644 index 000000000..dab338e4b --- /dev/null +++ b/src/console/mod.rs @@ -0,0 +1,4 @@ +//! Console apps. +pub mod ci; +pub mod clients; +pub mod profiling; diff --git a/src/console/profiling.rs b/src/console/profiling.rs new file mode 100644 index 000000000..3e2925d9c --- /dev/null +++ b/src/console/profiling.rs @@ -0,0 +1,202 @@ +//! This binary is used for profiling with [valgrind](https://valgrind.org/) +//! and [kcachegrind](https://kcachegrind.github.io/). +//! +//! # Requirements +//! +//! [valgrind](https://valgrind.org/) and [kcachegrind](https://kcachegrind.github.io/). +//! +//! On Ubuntu you can install them with: +//! +//! ```text +//! sudo apt install valgrind kcachegrind +//! ``` +//! +//! > NOTICE: valgrind executes the program you wan to profile and waits until +//! > it ends. Since the tracker is a service and does not end the profiling +//! > binary accepts an arguments with the duration you want to run the tracker, +//! > so that it terminates automatically after that period of time. +//! +//! # Run profiling +//! +//! To run the profiling you have to: +//! +//! 1. Build and run the tracker for profiling. +//! 2. Run the aquatic UDP load test tool to start collecting data in the tracker. +//! +//! Build and run the tracker for profiling: +//! +//! ```text +//! RUSTFLAGS='-g' cargo build --release --bin profiling \ +//! && export TORRUST_TRACKER_CONFIG_TOML_PATH="./share/default/config/tracker.udp.benchmarking.toml" \ +//! && valgrind \ +//! --tool=callgrind \ +//! --callgrind-out-file=callgrind.out \ +//! --collect-jumps=yes \ +//! --simulate-cache=yes \ +//! ./target/release/profiling 60 +//! ``` +//! +//! The output should be something like: +//! +//! ```text +//! RUSTFLAGS='-g' cargo build --release --bin profiling \ +//! && export TORRUST_TRACKER_CONFIG_TOML_PATH="./share/default/config/tracker.udp.benchmarking.toml" \ +//! && valgrind \ +//! --tool=callgrind \ +//! --callgrind-out-file=callgrind.out \ +//! --collect-jumps=yes \ +//! --simulate-cache=yes \ +//! ./target/release/profiling 60 +//! +//! Compiling torrust-tracker v3.0.0-alpha.12-develop (/home/developer/Documents/git/committer/me/github/torrust/torrust-tracker) +//! Finished `release` profile [optimized + debuginfo] target(s) in 1m 15s +//! ==122801== Callgrind, a call-graph generating cache profiler +//! ==122801== Copyright (C) 2002-2017, and GNU GPL'd, by Josef Weidendorfer et al. +//! ==122801== Using Valgrind-3.19.0 and LibVEX; rerun with -h for copyright info +//! ==122801== Command: ./target/release/profiling 60 +//! ==122801== +//! --122801-- warning: L3 cache found, using its data for the LL simulation. +//! ==122801== For interactive control, run 'callgrind_control -h'. +//! Loading configuration file: `./share/default/config/tracker.udp.benchmarking.toml` ... +//! Torrust successfully shutdown. +//! ==122801== +//! ==122801== Events : Ir Dr Dw I1mr D1mr D1mw ILmr DLmr DLmw +//! ==122801== Collected : 1160654816 278135882 247755311 24453652 12650490 16315690 10932 2481624 4832145 +//! ==122801== +//! ==122801== I refs: 1,160,654,816 +//! ==122801== I1 misses: 24,453,652 +//! ==122801== LLi misses: 10,932 +//! ==122801== I1 miss rate: 2.11% +//! ==122801== LLi miss rate: 0.00% +//! ==122801== +//! ==122801== D refs: 525,891,193 (278,135,882 rd + 247,755,311 wr) +//! ==122801== D1 misses: 28,966,180 ( 12,650,490 rd + 16,315,690 wr) +//! ==122801== LLd misses: 7,313,769 ( 2,481,624 rd + 4,832,145 wr) +//! ==122801== D1 miss rate: 5.5% ( 4.5% + 6.6% ) +//! ==122801== LLd miss rate: 1.4% ( 0.9% + 2.0% ) +//! ==122801== +//! ==122801== LL refs: 53,419,832 ( 37,104,142 rd + 16,315,690 wr) +//! ==122801== LL misses: 7,324,701 ( 2,492,556 rd + 4,832,145 wr) +//! ==122801== LL miss rate: 0.4% ( 0.2% + 2.0% ) +//! ``` +//! +//! > NOTICE: We are using an specific tracker configuration for profiling that +//! > removes all features except the UDP tracker and sets the logging level to `error`. +//! +//! Build the aquatic UDP load test command: +//! +//! ```text +//! cd /tmp +//! git clone git@github.com:greatest-ape/aquatic.git +//! cd aquatic +//! cargo build --profile=release-debug -p aquatic_udp_load_test +//! ./target/release-debug/aquatic_udp_load_test -p > "load-test-config.toml" +//! ``` +//! +//! Modify the "load-test-config.toml" file to change the UDP tracker port from +//! `3000` to `6969`. +//! +//! Running the aquatic UDP load test command: +//! +//! ```text +//! ./target/release-debug/aquatic_udp_load_test -c "load-test-config.toml" +//! ``` +//! +//! The output should be something like this: +//! +//! ```text +//! Starting client with config: Config { +//! server_address: 127.0.0.1:6969, +//! log_level: Error, +//! workers: 1, +//! duration: 0, +//! summarize_last: 0, +//! extra_statistics: true, +//! network: NetworkConfig { +//! multiple_client_ipv4s: true, +//! sockets_per_worker: 4, +//! recv_buffer: 8000000, +//! }, +//! requests: RequestConfig { +//! number_of_torrents: 1000000, +//! number_of_peers: 2000000, +//! scrape_max_torrents: 10, +//! announce_peers_wanted: 30, +//! weight_connect: 50, +//! weight_announce: 50, +//! weight_scrape: 1, +//! peer_seeder_probability: 0.75, +//! }, +//! } +//! +//! Requests out: 45097.51/second +//! Responses in: 4212.70/second +//! - Connect responses: 2098.15 +//! - Announce responses: 2074.95 +//! - Scrape responses: 39.59 +//! - Error responses: 0.00 +//! Peers per announce response: 0.00 +//! Announce responses per info hash: +//! - p10: 1 +//! - p25: 1 +//! - p50: 1 +//! - p75: 2 +//! - p90: 3 +//! - p95: 4 +//! - p99: 6 +//! - p99.9: 8 +//! - p100: 10 +//! ``` +//! +//! After running the tracker for some seconds the tracker will automatically stop +//! and `valgrind`will write the file `callgrind.out` with the data. +//! +//! You can now analyze the collected data with: +//! +//! ```text +//! kcachegrind callgrind.out +//! ``` +use std::env; +use std::time::Duration; + +use tokio::time::sleep; +use tracing::info; + +use crate::{app, bootstrap}; + +pub async fn run() { + // Parse command line arguments + let args: Vec = env::args().collect(); + + // Ensure an argument for duration is provided + if args.len() != 2 { + eprintln!("Usage: {} ", args[0]); + return; + } + + // Parse duration argument + let Ok(duration_secs) = args[1].parse::() else { + eprintln!("Invalid duration provided"); + return; + }; + + let (config, tracker) = bootstrap::app::setup(); + + let jobs = app::start(&config, tracker).await; + + // Run the tracker for a fixed duration + let run_duration = sleep(Duration::from_secs(duration_secs)); + + tokio::select! { + () = run_duration => { + info!("Torrust timed shutdown.."); + }, + _ = tokio::signal::ctrl_c() => { + info!("Torrust shutting down via Ctrl+C ..."); + // Await for all jobs to shutdown + futures::future::join_all(jobs).await; + } + } + + println!("Torrust successfully shutdown."); +} diff --git a/src/core/auth.rs b/src/core/auth.rs new file mode 100644 index 000000000..61ccbdb52 --- /dev/null +++ b/src/core/auth.rs @@ -0,0 +1,347 @@ +//! Tracker authentication services and structs. +//! +//! This module contains functions to handle tracker keys. +//! Tracker keys are tokens used to authenticate the tracker clients when the tracker runs +//! in `private` or `private_listed` modes. +//! +//! There are services to [`generate_key`] and [`verify_key_expiration`] authentication keys. +//! +//! Authentication keys are used only by [`HTTP`](crate::servers::http) trackers. All keys have an expiration time, that means +//! they are only valid during a period of time. After that time the expiring key will no longer be valid. +//! +//! Keys are stored in this struct: +//! +//! ```rust,no_run +//! use torrust_tracker::core::auth::Key; +//! use torrust_tracker_primitives::DurationSinceUnixEpoch; +//! +//! pub struct ExpiringKey { +//! /// Random 32-char string. For example: `YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ` +//! pub key: Key, +//! /// Timestamp, the key will be no longer valid after this timestamp +//! pub valid_until: Option, +//! } +//! ``` +//! +//! You can generate a new key valid for `9999` seconds and `0` nanoseconds from the current time with the following: +//! +//! ```rust,no_run +//! use torrust_tracker::core::auth; +//! use std::time::Duration; +//! +//! let expiring_key = auth::generate_key(Some(Duration::new(9999, 0))); +//! +//! // And you can later verify it with: +//! +//! assert!(auth::verify_key_expiration(&expiring_key).is_ok()); +//! ``` + +use std::panic::Location; +use std::str::FromStr; +use std::sync::Arc; +use std::time::Duration; + +use derive_more::Display; +use rand::distributions::Alphanumeric; +use rand::{thread_rng, Rng}; +use serde::{Deserialize, Serialize}; +use thiserror::Error; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_clock::conv::convert_from_timestamp_to_datetime_utc; +use torrust_tracker_located_error::{DynError, LocatedError}; +use torrust_tracker_primitives::DurationSinceUnixEpoch; +use tracing::debug; + +use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; +use crate::CurrentClock; + +/// It generates a new permanent random key [`PeerKey`]. +#[must_use] +pub fn generate_permanent_key() -> PeerKey { + generate_key(None) +} + +/// It generates a new random 32-char authentication [`PeerKey`]. +/// +/// It can be an expiring or permanent key. +/// +/// # Panics +/// +/// It would panic if the `lifetime: Duration` + Duration is more than `Duration::MAX`. +/// +/// # Arguments +/// +/// * `lifetime`: if `None` the key will be permanent. +#[must_use] +pub fn generate_key(lifetime: Option) -> PeerKey { + let random_id: String = thread_rng() + .sample_iter(&Alphanumeric) + .take(AUTH_KEY_LENGTH) + .map(char::from) + .collect(); + + if let Some(lifetime) = lifetime { + debug!("Generated key: {}, valid for: {:?} seconds", random_id, lifetime); + + PeerKey { + key: random_id.parse::().unwrap(), + valid_until: Some(CurrentClock::now_add(&lifetime).unwrap()), + } + } else { + debug!("Generated key: {}, permanent", random_id); + + PeerKey { + key: random_id.parse::().unwrap(), + valid_until: None, + } + } +} + +/// It verifies an [`PeerKey`]. It checks if the expiration date has passed. +/// Permanent keys without duration (`None`) do not expire. +/// +/// # Errors +/// +/// Will return: +/// +/// - `Error::KeyExpired` if `auth_key.valid_until` is past the `current_time`. +/// - `Error::KeyInvalid` if `auth_key.valid_until` is past the `None`. +pub fn verify_key_expiration(auth_key: &PeerKey) -> Result<(), Error> { + let current_time: DurationSinceUnixEpoch = CurrentClock::now(); + + match auth_key.valid_until { + Some(valid_until) => { + if valid_until < current_time { + Err(Error::KeyExpired { + location: Location::caller(), + }) + } else { + Ok(()) + } + } + None => Ok(()), // Permanent key + } +} + +/// An authentication key which can potentially have an expiration time. +/// After that time is will automatically become invalid. +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] +pub struct PeerKey { + /// Random 32-char string. For example: `YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ` + pub key: Key, + + /// Timestamp, the key will be no longer valid after this timestamp. + /// If `None` the keys will not expire (permanent key). + pub valid_until: Option, +} + +impl std::fmt::Display for PeerKey { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self.expiry_time() { + Some(expire_time) => write!(f, "key: `{}`, valid until `{}`", self.key, expire_time), + None => write!(f, "key: `{}`, permanent", self.key), + } + } +} + +impl PeerKey { + #[must_use] + pub fn key(&self) -> Key { + self.key.clone() + } + + /// It returns the expiry time. For example, for the starting time for Unix Epoch + /// (timestamp 0) it will return a `DateTime` whose string representation is + /// `1970-01-01 00:00:00 UTC`. + /// + /// # Panics + /// + /// Will panic when the key timestamp overflows the internal i64 type. + /// (this will naturally happen in 292.5 billion years) + #[must_use] + pub fn expiry_time(&self) -> Option> { + self.valid_until.map(convert_from_timestamp_to_datetime_utc) + } +} + +/// A token used for authentication. +/// +/// - It contains only ascii alphanumeric chars: lower and uppercase letters and +/// numbers. +/// - It's a 32-char string. +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone, Display, Hash)] +pub struct Key(String); + +impl Key { + /// # Errors + /// + /// Will return an error is the string represents an invalid key. + /// Valid keys can only contain 32 chars including 0-9, a-z and A-Z. + pub fn new(value: &str) -> Result { + if value.len() != AUTH_KEY_LENGTH { + return Err(ParseKeyError::InvalidKeyLength); + } + + if !value.chars().all(|c| c.is_ascii_alphanumeric()) { + return Err(ParseKeyError::InvalidChars); + } + + Ok(Self(value.to_owned())) + } + + #[must_use] + pub fn value(&self) -> &str { + &self.0 + } +} + +/// Error returned when a key cannot be parsed from a string. +/// +/// ```rust,no_run +/// use torrust_tracker::core::auth::Key; +/// use std::str::FromStr; +/// +/// let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; +/// let key = Key::from_str(key_string); +/// +/// assert!(key.is_ok()); +/// assert_eq!(key.unwrap().to_string(), key_string); +/// ``` +/// +/// If the string does not contains a valid key, the parser function will return +/// this error. +#[derive(Debug, Error)] +pub enum ParseKeyError { + #[error("Invalid key length. Key must be have 32 chars")] + InvalidKeyLength, + #[error("Invalid chars for key. Key can only alphanumeric chars (0-9, a-z, A-Z)")] + InvalidChars, +} + +impl FromStr for Key { + type Err = ParseKeyError; + + fn from_str(s: &str) -> Result { + Key::new(s)?; + Ok(Self(s.to_string())) + } +} + +/// Verification error. Error returned when an [`PeerKey`] cannot be +/// verified with the (`crate::core::auth::verify_key`) function. +#[derive(Debug, Error)] +#[allow(dead_code)] +pub enum Error { + #[error("Key could not be verified: {source}")] + KeyVerificationError { + source: LocatedError<'static, dyn std::error::Error + Send + Sync>, + }, + #[error("Failed to read key: {key}, {location}")] + UnableToReadKey { + location: &'static Location<'static>, + key: Box, + }, + #[error("Key has expired, {location}")] + KeyExpired { location: &'static Location<'static> }, +} + +impl From for Error { + fn from(e: r2d2_sqlite::rusqlite::Error) -> Self { + Error::KeyVerificationError { + source: (Arc::new(e) as DynError).into(), + } + } +} + +#[cfg(test)] +mod tests { + + mod key { + use std::str::FromStr; + + use crate::core::auth::Key; + + #[test] + fn should_be_parsed_from_an_string() { + let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; + let key = Key::from_str(key_string); + + assert!(key.is_ok()); + assert_eq!(key.unwrap().to_string(), key_string); + } + + #[test] + fn length_should_be_32() { + let key = Key::new(""); + assert!(key.is_err()); + + let string_longer_than_32 = "012345678901234567890123456789012"; // DevSkim: ignore DS173237 + let key = Key::new(string_longer_than_32); + assert!(key.is_err()); + } + + #[test] + fn should_only_include_alphanumeric_chars() { + let key = Key::new("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%"); + assert!(key.is_err()); + } + } + + mod expiring_auth_key { + use std::str::FromStr; + use std::time::Duration; + + use torrust_tracker_clock::clock; + use torrust_tracker_clock::clock::stopped::Stopped as _; + + use crate::core::auth; + + #[test] + fn should_be_parsed_from_an_string() { + let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; + let auth_key = auth::Key::from_str(key_string); + + assert!(auth_key.is_ok()); + assert_eq!(auth_key.unwrap().to_string(), key_string); + } + + #[test] + fn should_be_displayed() { + // Set the time to the current time. + clock::Stopped::local_set_to_unix_epoch(); + + let expiring_key = auth::generate_key(Some(Duration::from_secs(0))); + + assert_eq!( + expiring_key.to_string(), + format!("key: `{}`, valid until `1970-01-01 00:00:00 UTC`", expiring_key.key) // cspell:disable-line + ); + } + + #[test] + fn should_be_generated_with_a_expiration_time() { + let expiring_key = auth::generate_key(Some(Duration::new(9999, 0))); + + assert!(auth::verify_key_expiration(&expiring_key).is_ok()); + } + + #[test] + fn should_be_generate_and_verified() { + // Set the time to the current time. + clock::Stopped::local_set_to_system_time_now(); + + // Make key that is valid for 19 seconds. + let expiring_key = auth::generate_key(Some(Duration::from_secs(19))); + + // Mock the time has passed 10 sec. + clock::Stopped::local_add(&Duration::from_secs(10)).unwrap(); + + assert!(auth::verify_key_expiration(&expiring_key).is_ok()); + + // Mock the time has passed another 10 sec. + clock::Stopped::local_add(&Duration::from_secs(10)).unwrap(); + + assert!(auth::verify_key_expiration(&expiring_key).is_err()); + } + } +} diff --git a/src/tracker/databases/driver.rs b/src/core/databases/driver.rs similarity index 53% rename from src/tracker/databases/driver.rs rename to src/core/databases/driver.rs index 19cb7046e..a456a2650 100644 --- a/src/tracker/databases/driver.rs +++ b/src/core/databases/driver.rs @@ -1,23 +1,39 @@ //! Database driver factory. //! -//! See [`databases::driver::build`](crate::tracker::databases::driver::build) +//! See [`databases::driver::build`](crate::core::databases::driver::build) //! function for more information. -use torrust_tracker_primitives::DatabaseDriver; +use serde::{Deserialize, Serialize}; use super::error::Error; use super::mysql::Mysql; use super::sqlite::Sqlite; use super::{Builder, Database}; +/// The database management system used by the tracker. +/// +/// Refer to: +/// +/// - [Torrust Tracker Configuration](https://docs.rs/torrust-tracker-configuration). +/// - [Torrust Tracker](https://docs.rs/torrust-tracker). +/// +/// For more information about persistence. +#[derive(Serialize, Deserialize, PartialEq, Eq, Debug, derive_more::Display, Clone)] +pub enum Driver { + /// The Sqlite3 database driver. + Sqlite3, + /// The `MySQL` database driver. + MySQL, +} + /// It builds a new database driver. /// /// Example for `SQLite3`: /// /// ```rust,no_run -/// use torrust_tracker::tracker::databases; -/// use torrust_tracker_primitives::DatabaseDriver; +/// use torrust_tracker::core::databases; +/// use torrust_tracker::core::databases::driver::Driver; /// -/// let db_driver = DatabaseDriver::Sqlite3; +/// let db_driver = Driver::Sqlite3; /// let db_path = "./storage/tracker/lib/database/sqlite3.db".to_string(); /// let database = databases::driver::build(&db_driver, &db_path); /// ``` @@ -25,10 +41,10 @@ use super::{Builder, Database}; /// Example for `MySQL`: /// /// ```rust,no_run -/// use torrust_tracker::tracker::databases; -/// use torrust_tracker_primitives::DatabaseDriver; +/// use torrust_tracker::core::databases; +/// use torrust_tracker::core::databases::driver::Driver; /// -/// let db_driver = DatabaseDriver::MySQL; +/// let db_driver = Driver::MySQL; /// let db_path = "mysql://db_user:db_user_secret_password@mysql:3306/torrust_tracker".to_string(); /// let database = databases::driver::build(&db_driver, &db_path); /// ``` @@ -45,10 +61,10 @@ use super::{Builder, Database}; /// # Panics /// /// This function will panic if unable to create database tables. -pub fn build(driver: &DatabaseDriver, db_path: &str) -> Result, Error> { +pub fn build(driver: &Driver, db_path: &str) -> Result, Error> { let database = match driver { - DatabaseDriver::Sqlite3 => Builder::::build(db_path), - DatabaseDriver::MySQL => Builder::::build(db_path), + Driver::Sqlite3 => Builder::::build(db_path), + Driver::MySQL => Builder::::build(db_path), }?; database.create_database_tables().expect("Could not create database tables."); diff --git a/src/tracker/databases/error.rs b/src/core/databases/error.rs similarity index 71% rename from src/tracker/databases/error.rs rename to src/core/databases/error.rs index d89ec05de..4d64baf48 100644 --- a/src/tracker/databases/error.rs +++ b/src/core/databases/error.rs @@ -1,12 +1,13 @@ //! Database errors. //! -//! This module contains the [Database errors](crate::tracker::databases::error::Error). +//! This module contains the [Database errors](crate::core::databases::error::Error). use std::panic::Location; use std::sync::Arc; use r2d2_mysql::mysql::UrlError; -use torrust_tracker_located_error::{Located, LocatedError}; -use torrust_tracker_primitives::DatabaseDriver; +use torrust_tracker_located_error::{DynError, Located, LocatedError}; + +use super::driver::Driver; #[derive(thiserror::Error, Debug, Clone)] pub enum Error { @@ -14,21 +15,21 @@ pub enum Error { #[error("The {driver} query unexpectedly returned nothing: {source}")] QueryReturnedNoRows { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, - driver: DatabaseDriver, + driver: Driver, }, /// The query was malformed. #[error("The {driver} query was malformed: {source}")] InvalidQuery { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, - driver: DatabaseDriver, + driver: Driver, }, /// Unable to insert a record into the database #[error("Unable to insert record into {driver} database, {location}")] InsertFailed { location: &'static Location<'static>, - driver: DatabaseDriver, + driver: Driver, }, /// Unable to delete a record into the database @@ -36,21 +37,21 @@ pub enum Error { DeleteFailed { location: &'static Location<'static>, error_code: usize, - driver: DatabaseDriver, + driver: Driver, }, /// Unable to connect to the database #[error("Failed to connect to {driver} database: {source}")] ConnectionError { source: LocatedError<'static, UrlError>, - driver: DatabaseDriver, + driver: Driver, }, /// Unable to create a connection pool #[error("Failed to create r2d2 {driver} connection pool: {source}")] ConnectionPool { source: LocatedError<'static, r2d2::Error>, - driver: DatabaseDriver, + driver: Driver, }, } @@ -59,12 +60,12 @@ impl From for Error { fn from(err: r2d2_sqlite::rusqlite::Error) -> Self { match err { r2d2_sqlite::rusqlite::Error::QueryReturnedNoRows => Error::QueryReturnedNoRows { - source: (Arc::new(err) as Arc).into(), - driver: DatabaseDriver::Sqlite3, + source: (Arc::new(err) as DynError).into(), + driver: Driver::Sqlite3, }, _ => Error::InvalidQuery { - source: (Arc::new(err) as Arc).into(), - driver: DatabaseDriver::Sqlite3, + source: (Arc::new(err) as DynError).into(), + driver: Driver::Sqlite3, }, } } @@ -73,10 +74,10 @@ impl From for Error { impl From for Error { #[track_caller] fn from(err: r2d2_mysql::mysql::Error) -> Self { - let e: Arc = Arc::new(err); + let e: DynError = Arc::new(err); Error::InvalidQuery { source: e.into(), - driver: DatabaseDriver::MySQL, + driver: Driver::MySQL, } } } @@ -86,14 +87,14 @@ impl From for Error { fn from(err: UrlError) -> Self { Self::ConnectionError { source: Located(err).into(), - driver: DatabaseDriver::MySQL, + driver: Driver::MySQL, } } } -impl From<(r2d2::Error, DatabaseDriver)> for Error { +impl From<(r2d2::Error, Driver)> for Error { #[track_caller] - fn from(e: (r2d2::Error, DatabaseDriver)) -> Self { + fn from(e: (r2d2::Error, Driver)) -> Self { let (err, driver) = e; Self::ConnectionPool { source: Located(err).into(), diff --git a/src/tracker/databases/mod.rs b/src/core/databases/mod.rs similarity index 72% rename from src/tracker/databases/mod.rs rename to src/core/databases/mod.rs index e0a26be23..f559eb80e 100644 --- a/src/tracker/databases/mod.rs +++ b/src/core/databases/mod.rs @@ -1,14 +1,14 @@ //! The persistence module. //! -//! Persistence is currently implemented with one [`Database`](crate::tracker::databases::Database) trait. +//! Persistence is currently implemented with one [`Database`] trait. //! //! There are two implementations of the trait (two drivers): //! -//! - [`Mysql`](crate::tracker::databases::mysql::Mysql) -//! - [`Sqlite`](crate::tracker::databases::sqlite::Sqlite) +//! - [`Mysql`](crate::core::databases::mysql::Mysql) +//! - [`Sqlite`](crate::core::databases::sqlite::Sqlite) //! //! > **NOTICE**: There are no database migrations. If there are any changes, -//! we will implemented them or provide a script to migrate to the new schema. +//! > we will implemented them or provide a script to migrate to the new schema. //! //! The persistent objects are: //! @@ -22,10 +22,10 @@ //! ---|---|--- //! `id` | 1 | Autoincrement id //! `info_hash` | `c1277613db1d28709b034a017ab2cae4be07ae10` | `BitTorrent` infohash V1 -//! `completed` | 20 | The number of peers that have ever completed downloading the torrent associated to this entry. See [`Entry`](crate::tracker::torrent::Entry) for more information. +//! `completed` | 20 | The number of peers that have ever completed downloading the torrent associated to this entry. See [`Entry`](torrust_tracker_torrent_repository::entry::Entry) for more information. //! //! > **NOTICE**: The peer list for a torrent is not persisted. Since peer have to re-announce themselves on intervals, the data is be -//! regenerated again after some minutes. +//! > regenerated again after some minutes. //! //! # Torrent whitelist //! @@ -50,11 +50,11 @@ pub mod sqlite; use std::marker::PhantomData; -use async_trait::async_trait; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::PersistentTorrents; use self::error::Error; -use crate::shared::bit_torrent::info_hash::InfoHash; -use crate::tracker::auth::{self, Key}; +use crate::core::auth::{self, Key}; struct Builder where @@ -78,7 +78,6 @@ where } /// The persistence trait. It contains all the methods to interact with the database. -#[async_trait] pub trait Database: Sync + Send { /// It instantiates a new database driver. /// @@ -115,17 +114,17 @@ pub trait Database: Sync + Send { /// It loads the torrent metrics data from the database. /// /// It returns an array of tuples with the torrent - /// [`InfoHash`](crate::shared::bit_torrent::info_hash::InfoHash) and the - /// [`completed`](crate::tracker::torrent::Entry::completed) counter + /// [`InfoHash`] and the + /// [`downloaded`](torrust_tracker_torrent_repository::entry::Torrent::downloaded) counter /// which is the number of times the torrent has been downloaded. - /// See [`Entry::completed`](crate::tracker::torrent::Entry::completed). + /// See [`Entry::downloaded`](torrust_tracker_torrent_repository::entry::Torrent::downloaded). /// /// # Context: Torrent Metrics /// /// # Errors /// /// Will return `Err` if unable to load. - async fn load_persistent_torrents(&self) -> Result, Error>; + fn load_persistent_torrents(&self) -> Result; /// It saves the torrent metrics data into the database. /// @@ -134,7 +133,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Will return `Err` if unable to save. - async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error>; + fn save_persistent_torrent(&self, info_hash: &InfoHash, downloaded: u32) -> Result<(), Error>; // Whitelist @@ -145,7 +144,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Will return `Err` if unable to load. - async fn load_whitelist(&self) -> Result, Error>; + fn load_whitelist(&self) -> Result, Error>; /// It checks if the torrent is whitelisted. /// @@ -156,7 +155,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Will return `Err` if unable to load. - async fn get_info_hash_from_whitelist(&self, info_hash: &InfoHash) -> Result, Error>; + fn get_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result, Error>; /// It adds the torrent to the whitelist. /// @@ -165,7 +164,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Will return `Err` if unable to save. - async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result; + fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result; /// It checks if the torrent is whitelisted. /// @@ -174,8 +173,8 @@ pub trait Database: Sync + Send { /// # Errors /// /// Will return `Err` if unable to load. - async fn is_info_hash_whitelisted(&self, info_hash: &InfoHash) -> Result { - Ok(self.get_info_hash_from_whitelist(info_hash).await?.is_some()) + fn is_info_hash_whitelisted(&self, info_hash: InfoHash) -> Result { + Ok(self.get_info_hash_from_whitelist(info_hash)?.is_some()) } /// It removes the torrent from the whitelist. @@ -185,7 +184,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Will return `Err` if unable to save. - async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; + fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result; // Authentication keys @@ -196,19 +195,19 @@ pub trait Database: Sync + Send { /// # Errors /// /// Will return `Err` if unable to load. - async fn load_keys(&self) -> Result, Error>; + fn load_keys(&self) -> Result, Error>; /// It gets an expiring authentication key from the database. /// - /// It returns `Some(ExpiringKey)` if a [`ExpiringKey`](crate::tracker::auth::ExpiringKey) - /// with the input [`Key`](crate::tracker::auth::Key) exists, `None` otherwise. + /// It returns `Some(PeerKey)` if a [`PeerKey`](crate::core::auth::PeerKey) + /// with the input [`Key`] exists, `None` otherwise. /// /// # Context: Authentication Keys /// /// # Errors /// /// Will return `Err` if unable to load. - async fn get_key_from_keys(&self, key: &Key) -> Result, Error>; + fn get_key_from_keys(&self, key: &Key) -> Result, Error>; /// It adds an expiring authentication key to the database. /// @@ -217,7 +216,7 @@ pub trait Database: Sync + Send { /// # Errors /// /// Will return `Err` if unable to save. - async fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result; + fn add_key_to_keys(&self, auth_key: &auth::PeerKey) -> Result; /// It removes an expiring authentication key from the database. /// @@ -226,5 +225,5 @@ pub trait Database: Sync + Send { /// # Errors /// /// Will return `Err` if unable to load. - async fn remove_key_from_keys(&self, key: &Key) -> Result; + fn remove_key_from_keys(&self, key: &Key) -> Result; } diff --git a/src/tracker/databases/mysql.rs b/src/core/databases/mysql.rs similarity index 68% rename from src/tracker/databases/mysql.rs rename to src/core/databases/mysql.rs index 4419666ab..3a06c4982 100644 --- a/src/tracker/databases/mysql.rs +++ b/src/core/databases/mysql.rs @@ -2,30 +2,29 @@ use std::str::FromStr; use std::time::Duration; -use async_trait::async_trait; -use log::debug; use r2d2::Pool; use r2d2_mysql::mysql::prelude::Queryable; use r2d2_mysql::mysql::{params, Opts, OptsBuilder}; use r2d2_mysql::MySqlConnectionManager; -use torrust_tracker_primitives::DatabaseDriver; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::PersistentTorrents; +use tracing::debug; +use super::driver::Driver; use super::{Database, Error}; +use crate::core::auth::{self, Key}; use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; -use crate::shared::bit_torrent::info_hash::InfoHash; -use crate::tracker::auth::{self, Key}; -const DRIVER: DatabaseDriver = DatabaseDriver::MySQL; +const DRIVER: Driver = Driver::MySQL; pub struct Mysql { pool: Pool, } -#[async_trait] impl Database for Mysql { /// It instantiates a new `MySQL` database driver. /// - /// Refer to [`databases::Database::new`](crate::tracker::databases::Database::new). + /// Refer to [`databases::Database::new`](crate::core::databases::Database::new). /// /// # Errors /// @@ -39,7 +38,7 @@ impl Database for Mysql { Ok(Self { pool }) } - /// Refer to [`databases::Database::create_database_tables`](crate::tracker::databases::Database::create_database_tables). + /// Refer to [`databases::Database::create_database_tables`](crate::core::databases::Database::create_database_tables). fn create_database_tables(&self) -> Result<(), Error> { let create_whitelist_table = " CREATE TABLE IF NOT EXISTS whitelist ( @@ -61,7 +60,7 @@ impl Database for Mysql { CREATE TABLE IF NOT EXISTS `keys` ( `id` INT NOT NULL AUTO_INCREMENT, `key` VARCHAR({}) NOT NULL, - `valid_until` INT(10) NOT NULL, + `valid_until` INT(10), PRIMARY KEY (`id`), UNIQUE (`key`) );", @@ -79,7 +78,7 @@ impl Database for Mysql { Ok(()) } - /// Refer to [`databases::Database::drop_database_tables`](crate::tracker::databases::Database::drop_database_tables). + /// Refer to [`databases::Database::drop_database_tables`](crate::core::databases::Database::drop_database_tables). fn drop_database_tables(&self) -> Result<(), Error> { let drop_whitelist_table = " DROP TABLE `whitelist`;" @@ -104,8 +103,8 @@ impl Database for Mysql { Ok(()) } - /// Refer to [`databases::Database::load_persistent_torrents`](crate::tracker::databases::Database::load_persistent_torrents). - async fn load_persistent_torrents(&self) -> Result, Error> { + /// Refer to [`databases::Database::load_persistent_torrents`](crate::core::databases::Database::load_persistent_torrents). + fn load_persistent_torrents(&self) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let torrents = conn.query_map( @@ -116,26 +115,32 @@ impl Database for Mysql { }, )?; - Ok(torrents) + Ok(torrents.iter().copied().collect()) } - /// Refer to [`databases::Database::load_keys`](crate::tracker::databases::Database::load_keys). - async fn load_keys(&self) -> Result, Error> { + /// Refer to [`databases::Database::load_keys`](crate::core::databases::Database::load_keys). + fn load_keys(&self) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let keys = conn.query_map( "SELECT `key`, valid_until FROM `keys`", - |(key, valid_until): (String, i64)| auth::ExpiringKey { - key: key.parse::().unwrap(), - valid_until: Duration::from_secs(valid_until.unsigned_abs()), + |(key, valid_until): (String, Option)| match valid_until { + Some(valid_until) => auth::PeerKey { + key: key.parse::().unwrap(), + valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), + }, + None => auth::PeerKey { + key: key.parse::().unwrap(), + valid_until: None, + }, }, )?; Ok(keys) } - /// Refer to [`databases::Database::load_whitelist`](crate::tracker::databases::Database::load_whitelist). - async fn load_whitelist(&self) -> Result, Error> { + /// Refer to [`databases::Database::load_whitelist`](crate::core::databases::Database::load_whitelist). + fn load_whitelist(&self) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let info_hashes = conn.query_map("SELECT info_hash FROM whitelist", |info_hash: String| { @@ -145,8 +150,8 @@ impl Database for Mysql { Ok(info_hashes) } - /// Refer to [`databases::Database::save_persistent_torrent`](crate::tracker::databases::Database::save_persistent_torrent). - async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { + /// Refer to [`databases::Database::save_persistent_torrent`](crate::core::databases::Database::save_persistent_torrent). + fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { const COMMAND : &str = "INSERT INTO torrents (info_hash, completed) VALUES (:info_hash_str, :completed) ON DUPLICATE KEY UPDATE completed = VALUES(completed)"; let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; @@ -158,8 +163,8 @@ impl Database for Mysql { Ok(conn.exec_drop(COMMAND, params! { info_hash_str, completed })?) } - /// Refer to [`databases::Database::get_info_hash_from_whitelist`](crate::tracker::databases::Database::get_info_hash_from_whitelist). - async fn get_info_hash_from_whitelist(&self, info_hash: &InfoHash) -> Result, Error> { + /// Refer to [`databases::Database::get_info_hash_from_whitelist`](crate::core::databases::Database::get_info_hash_from_whitelist). + fn get_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let select = conn.exec_first::( @@ -172,8 +177,8 @@ impl Database for Mysql { Ok(info_hash) } - /// Refer to [`databases::Database::add_info_hash_to_whitelist`](crate::tracker::databases::Database::add_info_hash_to_whitelist). - async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { + /// Refer to [`databases::Database::add_info_hash_to_whitelist`](crate::core::databases::Database::add_info_hash_to_whitelist). + fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let info_hash_str = info_hash.to_string(); @@ -186,8 +191,8 @@ impl Database for Mysql { Ok(1) } - /// Refer to [`databases::Database::remove_info_hash_from_whitelist`](crate::tracker::databases::Database::remove_info_hash_from_whitelist). - async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { + /// Refer to [`databases::Database::remove_info_hash_from_whitelist`](crate::core::databases::Database::remove_info_hash_from_whitelist). + fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let info_hash = info_hash.to_string(); @@ -197,29 +202,38 @@ impl Database for Mysql { Ok(1) } - /// Refer to [`databases::Database::get_key_from_keys`](crate::tracker::databases::Database::get_key_from_keys). - async fn get_key_from_keys(&self, key: &Key) -> Result, Error> { + /// Refer to [`databases::Database::get_key_from_keys`](crate::core::databases::Database::get_key_from_keys). + fn get_key_from_keys(&self, key: &Key) -> Result, Error> { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; - let query = conn.exec_first::<(String, i64), _, _>( + let query = conn.exec_first::<(String, Option), _, _>( "SELECT `key`, valid_until FROM `keys` WHERE `key` = :key", params! { "key" => key.to_string() }, ); let key = query?; - Ok(key.map(|(key, expiry)| auth::ExpiringKey { - key: key.parse::().unwrap(), - valid_until: Duration::from_secs(expiry.unsigned_abs()), + Ok(key.map(|(key, opt_valid_until)| match opt_valid_until { + Some(valid_until) => auth::PeerKey { + key: key.parse::().unwrap(), + valid_until: Some(Duration::from_secs(valid_until.unsigned_abs())), + }, + None => auth::PeerKey { + key: key.parse::().unwrap(), + valid_until: None, + }, })) } - /// Refer to [`databases::Database::add_key_to_keys`](crate::tracker::databases::Database::add_key_to_keys). - async fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result { + /// Refer to [`databases::Database::add_key_to_keys`](crate::core::databases::Database::add_key_to_keys). + fn add_key_to_keys(&self, auth_key: &auth::PeerKey) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; let key = auth_key.key.to_string(); - let valid_until = auth_key.valid_until.as_secs().to_string(); + let valid_until = match auth_key.valid_until { + Some(valid_until) => valid_until.as_secs().to_string(), + None => todo!(), + }; conn.exec_drop( "INSERT INTO `keys` (`key`, valid_until) VALUES (:key, :valid_until)", @@ -229,8 +243,8 @@ impl Database for Mysql { Ok(1) } - /// Refer to [`databases::Database::remove_key_from_keys`](crate::tracker::databases::Database::remove_key_from_keys). - async fn remove_key_from_keys(&self, key: &Key) -> Result { + /// Refer to [`databases::Database::remove_key_from_keys`](crate::core::databases::Database::remove_key_from_keys). + fn remove_key_from_keys(&self, key: &Key) -> Result { let mut conn = self.pool.get().map_err(|e| (e, DRIVER))?; conn.exec_drop("DELETE FROM `keys` WHERE key = :key", params! { "key" => key.to_string() })?; diff --git a/src/tracker/databases/sqlite.rs b/src/core/databases/sqlite.rs similarity index 64% rename from src/tracker/databases/sqlite.rs rename to src/core/databases/sqlite.rs index 1968ee049..69470ee04 100644 --- a/src/tracker/databases/sqlite.rs +++ b/src/core/databases/sqlite.rs @@ -2,37 +2,37 @@ use std::panic::Location; use std::str::FromStr; -use async_trait::async_trait; use r2d2::Pool; +use r2d2_sqlite::rusqlite::params; +use r2d2_sqlite::rusqlite::types::Null; use r2d2_sqlite::SqliteConnectionManager; -use torrust_tracker_primitives::DatabaseDriver; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::{DurationSinceUnixEpoch, PersistentTorrents}; +use super::driver::Driver; use super::{Database, Error}; -use crate::shared::bit_torrent::info_hash::InfoHash; -use crate::shared::clock::DurationSinceUnixEpoch; -use crate::tracker::auth::{self, Key}; +use crate::core::auth::{self, Key}; -const DRIVER: DatabaseDriver = DatabaseDriver::Sqlite3; +const DRIVER: Driver = Driver::Sqlite3; pub struct Sqlite { pool: Pool, } -#[async_trait] impl Database for Sqlite { /// It instantiates a new `SQLite3` database driver. /// - /// Refer to [`databases::Database::new`](crate::tracker::databases::Database::new). + /// Refer to [`databases::Database::new`](crate::core::databases::Database::new). /// /// # Errors /// /// Will return `r2d2::Error` if `db_path` is not able to create `SqLite` database. fn new(db_path: &str) -> Result { let cm = SqliteConnectionManager::file(db_path); - Pool::new(cm).map_or_else(|err| Err((err, DatabaseDriver::Sqlite3).into()), |pool| Ok(Sqlite { pool })) + Pool::new(cm).map_or_else(|err| Err((err, Driver::Sqlite3).into()), |pool| Ok(Sqlite { pool })) } - /// Refer to [`databases::Database::create_database_tables`](crate::tracker::databases::Database::create_database_tables). + /// Refer to [`databases::Database::create_database_tables`](crate::core::databases::Database::create_database_tables). fn create_database_tables(&self) -> Result<(), Error> { let create_whitelist_table = " CREATE TABLE IF NOT EXISTS whitelist ( @@ -53,7 +53,7 @@ impl Database for Sqlite { CREATE TABLE IF NOT EXISTS keys ( id INTEGER PRIMARY KEY AUTOINCREMENT, key TEXT NOT NULL UNIQUE, - valid_until INTEGER NOT NULL + valid_until INTEGER );" .to_string(); @@ -66,7 +66,7 @@ impl Database for Sqlite { Ok(()) } - /// Refer to [`databases::Database::drop_database_tables`](crate::tracker::databases::Database::drop_database_tables). + /// Refer to [`databases::Database::drop_database_tables`](crate::core::databases::Database::drop_database_tables). fn drop_database_tables(&self) -> Result<(), Error> { let drop_whitelist_table = " DROP TABLE whitelist;" @@ -89,8 +89,8 @@ impl Database for Sqlite { Ok(()) } - /// Refer to [`databases::Database::load_persistent_torrents`](crate::tracker::databases::Database::load_persistent_torrents). - async fn load_persistent_torrents(&self) -> Result, Error> { + /// Refer to [`databases::Database::load_persistent_torrents`](crate::core::databases::Database::load_persistent_torrents). + fn load_persistent_torrents(&self) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT info_hash, completed FROM torrents")?; @@ -102,37 +102,38 @@ impl Database for Sqlite { Ok((info_hash, completed)) })?; - //torrent_iter?; - //let torrent_iter = torrent_iter.unwrap(); - - let torrents: Vec<(InfoHash, u32)> = torrent_iter.filter_map(std::result::Result::ok).collect(); - - Ok(torrents) + Ok(torrent_iter.filter_map(std::result::Result::ok).collect()) } - /// Refer to [`databases::Database::load_keys`](crate::tracker::databases::Database::load_keys). - async fn load_keys(&self) -> Result, Error> { + /// Refer to [`databases::Database::load_keys`](crate::core::databases::Database::load_keys). + fn load_keys(&self) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys")?; let keys_iter = stmt.query_map([], |row| { let key: String = row.get(0)?; - let valid_until: i64 = row.get(1)?; - - Ok(auth::ExpiringKey { - key: key.parse::().unwrap(), - valid_until: DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs()), - }) + let opt_valid_until: Option = row.get(1)?; + + match opt_valid_until { + Some(valid_until) => Ok(auth::PeerKey { + key: key.parse::().unwrap(), + valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), + }), + None => Ok(auth::PeerKey { + key: key.parse::().unwrap(), + valid_until: None, + }), + } })?; - let keys: Vec = keys_iter.filter_map(std::result::Result::ok).collect(); + let keys: Vec = keys_iter.filter_map(std::result::Result::ok).collect(); Ok(keys) } - /// Refer to [`databases::Database::load_whitelist`](crate::tracker::databases::Database::load_whitelist). - async fn load_whitelist(&self) -> Result, Error> { + /// Refer to [`databases::Database::load_whitelist`](crate::core::databases::Database::load_whitelist). + fn load_whitelist(&self) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT info_hash FROM whitelist")?; @@ -148,8 +149,8 @@ impl Database for Sqlite { Ok(info_hashes) } - /// Refer to [`databases::Database::save_persistent_torrent`](crate::tracker::databases::Database::save_persistent_torrent). - async fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { + /// Refer to [`databases::Database::save_persistent_torrent`](crate::core::databases::Database::save_persistent_torrent). + fn save_persistent_torrent(&self, info_hash: &InfoHash, completed: u32) -> Result<(), Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let insert = conn.execute( @@ -167,8 +168,8 @@ impl Database for Sqlite { } } - /// Refer to [`databases::Database::get_info_hash_from_whitelist`](crate::tracker::databases::Database::get_info_hash_from_whitelist). - async fn get_info_hash_from_whitelist(&self, info_hash: &InfoHash) -> Result, Error> { + /// Refer to [`databases::Database::get_info_hash_from_whitelist`](crate::core::databases::Database::get_info_hash_from_whitelist). + fn get_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT info_hash FROM whitelist WHERE info_hash = ?")?; @@ -180,8 +181,8 @@ impl Database for Sqlite { Ok(query.map(|f| InfoHash::from_str(&f.get_unwrap::<_, String>(0)).unwrap())) } - /// Refer to [`databases::Database::add_info_hash_to_whitelist`](crate::tracker::databases::Database::add_info_hash_to_whitelist). - async fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { + /// Refer to [`databases::Database::add_info_hash_to_whitelist`](crate::core::databases::Database::add_info_hash_to_whitelist). + fn add_info_hash_to_whitelist(&self, info_hash: InfoHash) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let insert = conn.execute("INSERT INTO whitelist (info_hash) VALUES (?)", [info_hash.to_string()])?; @@ -196,8 +197,8 @@ impl Database for Sqlite { } } - /// Refer to [`databases::Database::remove_info_hash_from_whitelist`](crate::tracker::databases::Database::remove_info_hash_from_whitelist). - async fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { + /// Refer to [`databases::Database::remove_info_hash_from_whitelist`](crate::core::databases::Database::remove_info_hash_from_whitelist). + fn remove_info_hash_from_whitelist(&self, info_hash: InfoHash) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let deleted = conn.execute("DELETE FROM whitelist WHERE info_hash = ?", [info_hash.to_string()])?; @@ -214,8 +215,8 @@ impl Database for Sqlite { } } - /// Refer to [`databases::Database::get_key_from_keys`](crate::tracker::databases::Database::get_key_from_keys). - async fn get_key_from_keys(&self, key: &Key) -> Result, Error> { + /// Refer to [`databases::Database::get_key_from_keys`](crate::core::databases::Database::get_key_from_keys). + fn get_key_from_keys(&self, key: &Key) -> Result, Error> { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let mut stmt = conn.prepare("SELECT key, valid_until FROM keys WHERE key = ?")?; @@ -225,23 +226,36 @@ impl Database for Sqlite { let key = rows.next()?; Ok(key.map(|f| { - let expiry: i64 = f.get(1).unwrap(); + let valid_until: Option = f.get(1).unwrap(); let key: String = f.get(0).unwrap(); - auth::ExpiringKey { - key: key.parse::().unwrap(), - valid_until: DurationSinceUnixEpoch::from_secs(expiry.unsigned_abs()), + + match valid_until { + Some(valid_until) => auth::PeerKey { + key: key.parse::().unwrap(), + valid_until: Some(DurationSinceUnixEpoch::from_secs(valid_until.unsigned_abs())), + }, + None => auth::PeerKey { + key: key.parse::().unwrap(), + valid_until: None, + }, } })) } - /// Refer to [`databases::Database::add_key_to_keys`](crate::tracker::databases::Database::add_key_to_keys). - async fn add_key_to_keys(&self, auth_key: &auth::ExpiringKey) -> Result { + /// Refer to [`databases::Database::add_key_to_keys`](crate::core::databases::Database::add_key_to_keys). + fn add_key_to_keys(&self, auth_key: &auth::PeerKey) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; - let insert = conn.execute( - "INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", - [auth_key.key.to_string(), auth_key.valid_until.as_secs().to_string()], - )?; + let insert = match auth_key.valid_until { + Some(valid_until) => conn.execute( + "INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", + [auth_key.key.to_string(), valid_until.as_secs().to_string()], + )?, + None => conn.execute( + "INSERT INTO keys (key, valid_until) VALUES (?1, ?2)", + params![auth_key.key.to_string(), Null], + )?, + }; if insert == 0 { Err(Error::InsertFailed { @@ -253,8 +267,8 @@ impl Database for Sqlite { } } - /// Refer to [`databases::Database::remove_key_from_keys`](crate::tracker::databases::Database::remove_key_from_keys). - async fn remove_key_from_keys(&self, key: &Key) -> Result { + /// Refer to [`databases::Database::remove_key_from_keys`](crate::core::databases::Database::remove_key_from_keys). + fn remove_key_from_keys(&self, key: &Key) -> Result { let conn = self.pool.get().map_err(|e| (e, DRIVER))?; let deleted = conn.execute("DELETE FROM keys WHERE key = ?", [key.to_string()])?; diff --git a/src/tracker/error.rs b/src/core/error.rs similarity index 64% rename from src/tracker/error.rs rename to src/core/error.rs index f1e622673..d89b030c4 100644 --- a/src/tracker/error.rs +++ b/src/core/error.rs @@ -9,6 +9,10 @@ use std::panic::Location; use torrust_tracker_located_error::LocatedError; +use torrust_tracker_primitives::info_hash::InfoHash; + +use super::auth::ParseKeyError; +use super::databases; /// Authentication or authorization error returned by the core `Tracker` #[derive(thiserror::Error, Debug, Clone)] @@ -19,13 +23,33 @@ pub enum Error { key: super::auth::Key, source: LocatedError<'static, dyn std::error::Error + Send + Sync>, }, + #[error("The peer is not authenticated, {location}")] PeerNotAuthenticated { location: &'static Location<'static> }, // Authorization errors #[error("The torrent: {info_hash}, is not whitelisted, {location}")] TorrentNotWhitelisted { - info_hash: crate::shared::bit_torrent::info_hash::InfoHash, + info_hash: InfoHash, location: &'static Location<'static>, }, } + +/// Errors related to peers keys. +#[allow(clippy::module_name_repetitions)] +#[derive(thiserror::Error, Debug, Clone)] +pub enum PeerKeyError { + #[error("Invalid peer key duration: {seconds_valid:?}, is not valid")] + DurationOverflow { seconds_valid: u64 }, + + #[error("Invalid key: {key}")] + InvalidKey { + key: String, + source: LocatedError<'static, ParseKeyError>, + }, + + #[error("Can't persist key: {source}")] + DatabaseError { + source: LocatedError<'static, databases::error::Error>, + }, +} diff --git a/src/tracker/mod.rs b/src/core/mod.rs similarity index 63% rename from src/tracker/mod.rs rename to src/core/mod.rs index 040751e12..a6ee830db 100644 --- a/src/tracker/mod.rs +++ b/src/core/mod.rs @@ -12,9 +12,9 @@ //! ```text //! Delivery layer Domain layer //! -//! HTTP tracker | +//! HTTP tracker | //! UDP tracker |> Core tracker -//! Tracker REST API | +//! Tracker REST API | //! ``` //! //! # Table of contents @@ -52,13 +52,13 @@ //! The tracker responds to the peer with the list of other peers in the swarm so that //! the peer can contact them to start downloading pieces of the file from them. //! -//! Once you have instantiated the `Tracker` you can `announce` a new [`peer`](crate::tracker::peer::Peer) with: +//! Once you have instantiated the `Tracker` you can `announce` a new [`peer::Peer`] with: //! //! ```rust,no_run -//! use torrust_tracker::tracker::peer; -//! use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -//! use torrust_tracker::shared::clock::DurationSinceUnixEpoch; -//! use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; +//! use torrust_tracker_primitives::peer; +//! use torrust_tracker_primitives::info_hash::InfoHash; +//! use torrust_tracker_primitives::{DurationSinceUnixEpoch, NumberOfBytes}; +//! use torrust_tracker_primitives::announce_event::AnnounceEvent; //! use std::net::SocketAddr; //! use std::net::IpAddr; //! use std::net::Ipv4Addr; @@ -97,26 +97,26 @@ //! The returned struct is: //! //! ```rust,no_run -//! use torrust_tracker::tracker::peer::Peer; +//! use torrust_tracker_primitives::peer; +//! use torrust_tracker_configuration::AnnouncePolicy; //! //! pub struct AnnounceData { -//! pub peers: Vec, -//! pub swarm_stats: SwarmStats, -//! pub interval: u32, // Option `announce_interval` from core tracker configuration -//! pub interval_min: u32, // Option `min_announce_interval` from core tracker configuration +//! pub peers: Vec, +//! pub swarm_stats: SwarmMetadata, +//! pub policy: AnnouncePolicy, // the tracker announce policy. //! } //! -//! pub struct SwarmStats { +//! pub struct SwarmMetadata { //! pub completed: u32, // The number of peers that have ever completed downloading //! pub seeders: u32, // The number of active peers that have completed downloading (seeders) //! pub leechers: u32, // The number of active peers that have not completed downloading (leechers) //! } //! //! // Core tracker configuration -//! pub struct Configuration { +//! pub struct AnnounceInterval { //! // ... -//! pub announce_interval: u32, // Interval in seconds that the client should wait between sending regular announce requests to the tracker -//! pub min_announce_interval: u32, // Minimum announce interval. Clients must not reannounce more frequently than this +//! pub interval: u32, // Interval in seconds that the client should wait between sending regular announce requests to the tracker +//! pub interval_min: u32, // Minimum announce interval. Clients must not reannounce more frequently than this //! // ... //! } //! ``` @@ -136,7 +136,7 @@ //! The returned struct is: //! //! ```rust,no_run -//! use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +//! use torrust_tracker_primitives::info_hash::InfoHash; //! use std::collections::HashMap; //! //! pub struct ScrapeData { @@ -165,7 +165,7 @@ //! There are two data structures for infohashes: byte arrays and hex strings: //! //! ```rust,no_run -//! use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +//! use torrust_tracker_primitives::info_hash::InfoHash; //! use std::str::FromStr; //! //! let info_hash: InfoHash = [255u8; 20].into(); @@ -183,7 +183,7 @@ //! //! ## Torrents //! -//! The [`torrent`](crate::tracker::torrent) module contains all the data structures stored by the `Tracker` except for peers. +//! The [`torrent`] module contains all the data structures stored by the `Tracker` except for peers. //! //! We can represent the data stored in memory internally by the `Tracker` with this JSON object: //! @@ -219,10 +219,10 @@ //! The torrent entry has two attributes: //! //! - `completed`: which is hte number of peers that have completed downloading the torrent file/s. As they have completed downloading, -//! they have a full version of the torrent data, and they can provide the full data to other peers. That's why they are also known as "seeders". +//! they have a full version of the torrent data, and they can provide the full data to other peers. That's why they are also known as "seeders". //! - `peers`: an indexed and orderer list of peer for the torrent. Each peer contains the data received from the peer in the `announce` request. //! -//! The [`torrent`](crate::tracker::torrent) module not only contains the original data obtained from peer via `announce` requests, it also contains +//! The [`torrent`] module not only contains the original data obtained from peer via `announce` requests, it also contains //! aggregate data that can be derived from the original data. For example: //! //! ```rust,no_run @@ -232,33 +232,28 @@ //! pub incomplete: u32, // The number of active peers that have not completed downloading (leechers) //! } //! -//! pub struct SwarmStats { -//! pub completed: u32, // The number of peers that have ever completed downloading -//! pub seeders: u32, // The number of active peers that have completed downloading (seeders) -//! pub leechers: u32, // The number of active peers that have not completed downloading (leechers) -//! } //! ``` //! //! > **NOTICE**: that `complete` or `completed` peers are the peers that have completed downloading, but only the active ones are considered "seeders". //! -//! `SwarmStats` struct follows name conventions for `scrape` responses. See [BEP 48](https://www.bittorrent.org/beps/bep_0048.html), while `SwarmStats` +//! `SwarmMetadata` struct follows name conventions for `scrape` responses. See [BEP 48](https://www.bittorrent.org/beps/bep_0048.html), while `SwarmMetadata` //! is used for the rest of cases. //! -//! Refer to [`torrent`](crate::tracker::torrent) module for more details about these data structures. +//! Refer to [`torrent`] module for more details about these data structures. //! //! ## Peers //! //! A `Peer` is the struct used by the `Tracker` to keep peers data: //! //! ```rust,no_run -//! use torrust_tracker::tracker::peer::Id; +//! use torrust_tracker_primitives::peer; //! use std::net::SocketAddr; -//! use torrust_tracker::shared::clock::DurationSinceUnixEpoch; +//! use torrust_tracker_primitives::DurationSinceUnixEpoch; //! use aquatic_udp_protocol::NumberOfBytes; //! use aquatic_udp_protocol::AnnounceEvent; //! //! pub struct Peer { -//! pub peer_id: Id, // The peer ID +//! pub peer_id: peer::Id, // The peer ID //! pub peer_addr: SocketAddr, // Peer socket address //! pub updated: DurationSinceUnixEpoch, // Last time (timestamp) when the peer was updated //! pub uploaded: NumberOfBytes, // Number of bytes the peer has uploaded so far @@ -310,26 +305,38 @@ //! `c1277613db1d28709b034a017ab2cae4be07ae10` is the torrent infohash and `completed` contains the number of peers //! that have a full version of the torrent data, also known as seeders. //! -//! Refer to [`peer`](crate::tracker::peer) module for more information about peers. +//! Refer to [`peer`] module for more information about peers. //! //! # Configuration //! //! You can control the behavior of this module with the module settings: //! //! ```toml -//! log_level = "debug" -//! mode = "public" -//! db_driver = "Sqlite3" -//! db_path = "./storage/tracker/lib/database/sqlite3.db" -//! announce_interval = 120 -//! min_announce_interval = 120 -//! max_peer_timeout = 900 +//! [logging] +//! threshold = "debug" +//! +//! [core] +//! inactive_peer_cleanup_interval = 600 +//! listed = false +//! private = false +//! tracker_usage_statistics = true +//! +//! [core.announce_policy] +//! interval = 120 +//! interval_min = 120 +//! +//! [core.database] +//! driver = "sqlite3" +//! path = "./storage/tracker/lib/database/sqlite3.db" +//! +//! [core.net] //! on_reverse_proxy = false //! external_ip = "2.137.87.41" -//! tracker_usage_statistics = true -//! persistent_torrent_completed_stat = true -//! inactive_peer_cleanup_interval = 600 -//! remove_peerless_torrents = false +//! +//! [core.tracker_policy] +//! max_peer_timeout = 900 +//! persistent_torrent_completed_stat = false +//! remove_peerless_torrents = true //! ``` //! //! Refer to the [`configuration` module documentation](https://docs.rs/torrust-tracker-configuration) to get more information about all options. @@ -348,7 +355,7 @@ //! //! Services can include extra features like pagination, for example. //! -//! Refer to [`services`](crate::tracker::services) module for more information about services. +//! Refer to [`services`] module for more information about services. //! //! # Authentication //! @@ -363,8 +370,8 @@ //! //! To learn more about tracker authentication, refer to the following modules : //! -//! - [`auth`](crate::tracker::auth) module. -//! - [`tracker`](crate::tracker) module. +//! - [`auth`] module. +//! - [`core`](crate::core) module. //! - [`http`](crate::servers::http) module. //! //! # Statistics @@ -406,7 +413,7 @@ //! - `scrapes_handled`: number of `scrape` handled requests by the tracker //! //! > **NOTICE**: as the HTTP tracker does not have an specific `connection` request like the UDP tracker, `connections_handled` are -//! increased on every `announce` and `scrape` requests. +//! > increased on every `announce` and `scrape` requests. //! //! The tracker exposes an event sender API that allows the tracker users to send events. When a higher application service handles a //! `connection` , `announce` or `scrape` requests, it notifies the `Tracker` by sending statistics events. @@ -417,7 +424,7 @@ //! tracker.send_stats_event(statistics::Event::Tcp4Announce).await //! ``` //! -//! Refer to [`statistics`](crate::tracker::statistics) module for more information about statistics. +//! Refer to [`statistics`] module for more information about statistics. //! //! # Persistence //! @@ -430,33 +437,44 @@ //! - Torrent whitelist //! - Torrent metrics //! -//! Refer to [`databases`](crate::tracker::databases) module for more information about persistence. +//! Refer to [`databases`] module for more information about persistence. pub mod auth; pub mod databases; pub mod error; -pub mod peer; pub mod services; pub mod statistics; pub mod torrent; -use std::collections::btree_map::Entry; -use std::collections::{BTreeMap, HashMap}; +pub mod peer_tests; + +use std::collections::HashMap; use std::net::IpAddr; use std::panic::Location; use std::sync::Arc; use std::time::Duration; +use auth::PeerKey; +use databases::driver::Driver; +use derive_more::Constructor; +use error::PeerKeyError; use tokio::sync::mpsc::error::SendError; -use tokio::sync::{RwLock, RwLockReadGuard}; -use torrust_tracker_configuration::Configuration; -use torrust_tracker_primitives::TrackerMode; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_configuration::v2_0_0::database; +use torrust_tracker_configuration::{AnnouncePolicy, Core, TORRENT_PEERS_LIMIT}; +use torrust_tracker_located_error::Located; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; +use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch}; +use torrust_tracker_torrent_repository::entry::EntrySync; +use torrust_tracker_torrent_repository::repository::Repository; +use tracing::debug; use self::auth::Key; use self::error::Error; -use self::peer::Peer; -use self::torrent::{SwarmMetadata, SwarmStats}; -use crate::shared::bit_torrent::info_hash::InfoHash; -use crate::tracker::databases::Database; +use self::torrent::Torrents; +use crate::core::databases::Database; +use crate::CurrentClock; /// The domain layer tracker service. /// @@ -465,52 +483,41 @@ use crate::tracker::databases::Database; /// authentication and other services. /// /// > **NOTICE**: the `Tracker` is not responsible for handling the network layer. -/// Typically, the `Tracker` is used by a higher application service that handles -/// the network layer. +/// > Typically, the `Tracker` is used by a higher application service that handles +/// > the network layer. pub struct Tracker { - /// `Tracker` configuration. See [`torrust-tracker-configuration`](torrust_tracker_configuration) - pub config: Arc, - /// A database driver implementation: [`Sqlite3`](crate::tracker::databases::sqlite) - /// or [`MySQL`](crate::tracker::databases::mysql) - pub database: Box, - mode: TrackerMode, - keys: RwLock>, - whitelist: RwLock>, - torrents: RwLock>, + /// The tracker configuration. + config: Core, + + /// A database driver implementation: [`Sqlite3`](crate::core::databases::sqlite) + /// or [`MySQL`](crate::core::databases::mysql) + database: Arc>, + + /// Tracker users' keys. Only for private trackers. + keys: tokio::sync::RwLock>, + + /// The list of allowed torrents. Only for listed trackers. + whitelist: tokio::sync::RwLock>, + + /// The in-memory torrents repository. + torrents: Arc, + + /// Service to send stats events. stats_event_sender: Option>, - stats_repository: statistics::Repo, -} -/// Structure that holds general `Tracker` torrents metrics. -/// -/// Metrics are aggregate values for all torrents. -#[derive(Debug, PartialEq, Default)] -pub struct TorrentsMetrics { - /// Total number of seeders for all torrents - pub seeders: u64, - /// Total number of peers that have ever completed downloading for all torrents. - pub completed: u64, - /// Total number of leechers for all torrents. - pub leechers: u64, - /// Total number of torrents. - pub torrents: u64, + /// The in-memory stats repo. + stats_repository: statistics::Repo, } /// Structure that holds the data returned by the `announce` request. -#[derive(Debug, PartialEq, Default)] +#[derive(Clone, Debug, PartialEq, Constructor, Default)] pub struct AnnounceData { /// The list of peers that are downloading the same torrent. /// It excludes the peer that made the request. - pub peers: Vec, + pub peers: Vec>, /// Swarm statistics - pub swarm_stats: SwarmStats, - /// The interval in seconds that the client should wait between sending - /// regular requests to the tracker. - /// Refer to [`announce_interval`](torrust_tracker_configuration::Configuration::announce_interval). - pub interval: u32, - /// The minimum announce interval in seconds that the client should wait. - /// Refer to [`min_announce_interval`](torrust_tracker_configuration::Configuration::min_announce_interval). - pub interval_min: u32, + pub stats: SwarmMetadata, + pub policy: AnnouncePolicy, } /// Structure that holds the data returned by the `scrape` request. @@ -551,6 +558,20 @@ impl ScrapeData { } } +/// This type contains the info needed to add a new tracker key. +/// +/// You can upload a pre-generated key or let the app to generate a new one. +/// You can also set an expiration date or leave it empty (`None`) if you want +/// to create a permanent key that does not expire. +#[derive(Debug)] +pub struct AddKeyRequest { + /// The pre-generated key. Use `None` to generate a random key. + pub opt_key: Option, + + /// How long the key will be valid in seconds. Use `None` for permanent keys. + pub opt_seconds_valid: Option, +} + impl Tracker { /// `Tracker` constructor. /// @@ -558,20 +579,22 @@ impl Tracker { /// /// Will return a `databases::error::Error` if unable to connect to database. The `Tracker` is responsible for the persistence. pub fn new( - config: Arc, + config: &Core, stats_event_sender: Option>, stats_repository: statistics::Repo, ) -> Result { - let database = databases::driver::build(&config.db_driver, &config.db_path)?; + let driver = match config.database.driver { + database::Driver::Sqlite3 => Driver::Sqlite3, + database::Driver::MySQL => Driver::MySQL, + }; - let mode = config.mode; + let database = Arc::new(databases::driver::build(&driver, &config.database.path)?); Ok(Tracker { - config, - mode, - keys: RwLock::new(std::collections::HashMap::new()), - whitelist: RwLock::new(std::collections::HashSet::new()), - torrents: RwLock::new(std::collections::BTreeMap::new()), + config: config.clone(), + keys: tokio::sync::RwLock::new(std::collections::HashMap::new()), + whitelist: tokio::sync::RwLock::new(std::collections::HashSet::new()), + torrents: Arc::default(), stats_event_sender, stats_repository, database, @@ -580,17 +603,17 @@ impl Tracker { /// Returns `true` is the tracker is in public mode. pub fn is_public(&self) -> bool { - self.mode == TrackerMode::Public + !self.config.private } /// Returns `true` is the tracker is in private mode. pub fn is_private(&self) -> bool { - self.mode == TrackerMode::Private || self.mode == TrackerMode::PrivateListed + self.config.private } /// Returns `true` is the tracker is in whitelisted mode. - pub fn is_whitelisted(&self) -> bool { - self.mode == TrackerMode::Listed || self.mode == TrackerMode::PrivateListed + pub fn is_listed(&self) -> bool { + self.config.listed } /// Returns `true` if the tracker requires authentication. @@ -598,12 +621,25 @@ impl Tracker { self.is_private() } + /// Returns `true` is the tracker is in whitelisted mode. + pub fn is_behind_reverse_proxy(&self) -> bool { + self.config.net.on_reverse_proxy + } + + pub fn get_announce_policy(&self) -> AnnouncePolicy { + self.config.announce_policy + } + + pub fn get_maybe_external_ip(&self) -> Option { + self.config.net.external_ip + } + /// It handles an announce request. /// /// # Context: Tracker /// /// BEP 03: [The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html). - pub async fn announce(&self, info_hash: &InfoHash, peer: &mut Peer, remote_client_ip: &IpAddr) -> AnnounceData { + pub fn announce(&self, info_hash: &InfoHash, peer: &mut peer::Peer, remote_client_ip: &IpAddr) -> AnnounceData { // code-review: maybe instead of mutating the peer we could just return // a tuple with the new peer and the announce data: (Peer, AnnounceData). // It could even be a different struct: `StoredPeer` or `PublicPeer`. @@ -619,17 +655,18 @@ impl Tracker { // we are actually handling authentication at the handlers level. So I would extract that // responsibility into another authentication service. - peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.get_ext_ip())); + debug!("Before: {peer:?}"); + peer.change_ip(&assign_ip_address_to_peer(remote_client_ip, self.config.net.external_ip)); + debug!("After: {peer:?}"); - let swarm_stats = self.update_torrent_with_peer_and_get_stats(info_hash, peer).await; + let stats = self.upsert_peer_and_get_stats(info_hash, peer); - let peers = self.get_peers_for_peer(info_hash, peer).await; + let peers = self.get_peers_for(info_hash, peer); AnnounceData { peers, - swarm_stats, - interval: self.config.announce_interval, - interval_min: self.config.min_announce_interval, + stats, + policy: self.get_announce_policy(), } } @@ -643,7 +680,7 @@ impl Tracker { for info_hash in info_hashes { let swarm_metadata = match self.authorize(info_hash).await { - Ok(()) => self.get_swarm_metadata(info_hash).await, + Ok(()) => self.get_swarm_metadata(info_hash), Err(_) => SwarmMetadata::zeroed(), }; scrape_data.add_file(info_hash, swarm_metadata); @@ -653,9 +690,8 @@ impl Tracker { } /// It returns the data for a `scrape` response. - async fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { - let torrents = self.get_torrents().await; - match torrents.get(info_hash) { + fn get_swarm_metadata(&self, info_hash: &InfoHash) -> SwarmMetadata { + match self.torrents.get(info_hash) { Some(torrent_entry) => torrent_entry.get_swarm_metadata(), None => SwarmMetadata::default(), } @@ -669,46 +705,28 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to load the list of `persistent_torrents` from the database. - pub async fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { - let persistent_torrents = self.database.load_persistent_torrents().await?; - - let mut torrents = self.torrents.write().await; + pub fn load_torrents_from_database(&self) -> Result<(), databases::error::Error> { + let persistent_torrents = self.database.load_persistent_torrents()?; - for (info_hash, completed) in persistent_torrents { - // Skip if torrent entry already exists - if torrents.contains_key(&info_hash) { - continue; - } - - let torrent_entry = torrent::Entry { - peers: BTreeMap::default(), - completed, - }; - - torrents.insert(info_hash, torrent_entry); - } + self.torrents.import_persistent(&persistent_torrents); Ok(()) } - async fn get_peers_for_peer(&self, info_hash: &InfoHash, peer: &Peer) -> Vec { - let read_lock = self.torrents.read().await; - - match read_lock.get(info_hash) { + fn get_peers_for(&self, info_hash: &InfoHash, peer: &peer::Peer) -> Vec> { + match self.torrents.get(info_hash) { None => vec![], - Some(entry) => entry.get_peers_for_peer(peer).into_iter().copied().collect(), + Some(entry) => entry.get_peers_for_client(&peer.peer_addr, Some(TORRENT_PEERS_LIMIT)), } } /// # Context: Tracker /// /// Get all torrent peers for a given torrent - pub async fn get_all_torrent_peers(&self, info_hash: &InfoHash) -> Vec { - let read_lock = self.torrents.read().await; - - match read_lock.get(info_hash) { + pub fn get_torrent_peers(&self, info_hash: &InfoHash) -> Vec> { + match self.torrents.get(info_hash) { None => vec![], - Some(entry) => entry.get_all_peers().into_iter().copied().collect(), + Some(entry) => entry.get_peers(Some(TORRENT_PEERS_LIMIT)), } } @@ -717,87 +735,60 @@ impl Tracker { /// needed for a `announce` request response. /// /// # Context: Tracker - pub async fn update_torrent_with_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> torrent::SwarmStats { - // code-review: consider splitting the function in two (command and query segregation). - // `update_torrent_with_peer` and `get_stats` + pub fn upsert_peer_and_get_stats(&self, info_hash: &InfoHash, peer: &peer::Peer) -> SwarmMetadata { + let swarm_metadata_before = match self.torrents.get_swarm_metadata(info_hash) { + Some(swarm_metadata) => swarm_metadata, + None => SwarmMetadata::zeroed(), + }; - let mut torrents = self.torrents.write().await; + self.torrents.upsert_peer(info_hash, peer); - let torrent_entry = match torrents.entry(*info_hash) { - Entry::Vacant(vacant) => vacant.insert(torrent::Entry::new()), - Entry::Occupied(entry) => entry.into_mut(), + let swarm_metadata_after = match self.torrents.get_swarm_metadata(info_hash) { + Some(swarm_metadata) => swarm_metadata, + None => SwarmMetadata::zeroed(), }; - let stats_updated = torrent_entry.update_peer(peer); - - // todo: move this action to a separate worker - if self.config.persistent_torrent_completed_stat && stats_updated { - drop( - self.database - .save_persistent_torrent(info_hash, torrent_entry.completed) - .await, - ); + if swarm_metadata_before != swarm_metadata_after { + self.persist_stats(info_hash, &swarm_metadata_after); } - let (seeders, completed, leechers) = torrent_entry.get_stats(); - - torrent::SwarmStats { - completed, - seeders, - leechers, - } + swarm_metadata_after } - pub async fn get_torrents(&self) -> RwLockReadGuard<'_, BTreeMap> { - self.torrents.read().await + /// It stores the torrents stats into the database (if persistency is enabled). + /// + /// # Context: Tracker + fn persist_stats(&self, info_hash: &InfoHash, swarm_metadata: &SwarmMetadata) { + if self.config.tracker_policy.persistent_torrent_completed_stat { + let completed = swarm_metadata.downloaded; + let info_hash = *info_hash; + + drop(self.database.save_persistent_torrent(&info_hash, completed)); + } } /// It calculates and returns the general `Tracker` - /// [`TorrentsMetrics`](crate::tracker::TorrentsMetrics) + /// [`TorrentsMetrics`] /// /// # Context: Tracker - pub async fn get_torrents_metrics(&self) -> TorrentsMetrics { - let mut torrents_metrics = TorrentsMetrics { - seeders: 0, - completed: 0, - leechers: 0, - torrents: 0, - }; - - let db = self.get_torrents().await; - - db.values().for_each(|torrent_entry| { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - torrents_metrics.seeders += u64::from(seeders); - torrents_metrics.completed += u64::from(completed); - torrents_metrics.leechers += u64::from(leechers); - torrents_metrics.torrents += 1; - }); - - torrents_metrics + /// + /// # Panics + /// Panics if unable to get the torrent metrics. + pub fn get_torrents_metrics(&self) -> TorrentsMetrics { + self.torrents.get_metrics() } - /// Remove inactive peers and (optionally) peerless torrents + /// Remove inactive peers and (optionally) peerless torrents. /// /// # Context: Tracker - pub async fn cleanup_torrents(&self) { - let mut torrents_lock = self.torrents.write().await; + pub fn cleanup_torrents(&self) { + let current_cutoff = CurrentClock::now_sub(&Duration::from_secs(u64::from(self.config.tracker_policy.max_peer_timeout))) + .unwrap_or_default(); - // If we don't need to remove torrents we will use the faster iter - if self.config.remove_peerless_torrents { - torrents_lock.retain(|_, torrent_entry| { - torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); + self.torrents.remove_inactive_peers(current_cutoff); - if self.config.persistent_torrent_completed_stat { - torrent_entry.completed > 0 || !torrent_entry.peers.is_empty() - } else { - !torrent_entry.peers.is_empty() - } - }); - } else { - for torrent_entry in (*torrents_lock).values_mut() { - torrent_entry.remove_inactive_peers(self.config.max_peer_timeout); - } + if self.config.tracker_policy.remove_peerless_torrents { + self.torrents.remove_peerless_torrents(&self.config.tracker_policy); } } @@ -817,9 +808,96 @@ impl Tracker { } } + /// Adds new peer keys to the tracker. + /// + /// Keys can be pre-generated or randomly created. They can also be permanent or expire. + /// + /// # Errors + /// + /// Will return an error if: + /// + /// - The key duration overflows the duration type maximum value. + /// - The provided pre-generated key is invalid. + /// - The key could not been persisted due to database issues. + pub async fn add_peer_key(&self, add_key_req: AddKeyRequest) -> Result { + // code-review: all methods related to keys should be moved to a new independent "keys" service. + + match add_key_req.opt_key { + // Upload pre-generated key + Some(pre_existing_key) => { + if let Some(seconds_valid) = add_key_req.opt_seconds_valid { + // Expiring key + let Some(valid_until) = CurrentClock::now_add(&Duration::from_secs(seconds_valid)) else { + return Err(PeerKeyError::DurationOverflow { seconds_valid }); + }; + + let key = pre_existing_key.parse::(); + + match key { + Ok(key) => match self.add_auth_key(key, Some(valid_until)).await { + Ok(auth_key) => Ok(auth_key), + Err(err) => Err(PeerKeyError::DatabaseError { + source: Located(err).into(), + }), + }, + Err(err) => Err(PeerKeyError::InvalidKey { + key: pre_existing_key, + source: Located(err).into(), + }), + } + } else { + // Permanent key + let key = pre_existing_key.parse::(); + + match key { + Ok(key) => match self.add_permanent_auth_key(key).await { + Ok(auth_key) => Ok(auth_key), + Err(err) => Err(PeerKeyError::DatabaseError { + source: Located(err).into(), + }), + }, + Err(err) => Err(PeerKeyError::InvalidKey { + key: pre_existing_key, + source: Located(err).into(), + }), + } + } + } + // Generate a new random key + None => match add_key_req.opt_seconds_valid { + // Expiring key + Some(seconds_valid) => match self.generate_auth_key(Some(Duration::from_secs(seconds_valid))).await { + Ok(auth_key) => Ok(auth_key), + Err(err) => Err(PeerKeyError::DatabaseError { + source: Located(err).into(), + }), + }, + // Permanent key + None => match self.generate_permanent_auth_key().await { + Ok(auth_key) => Ok(auth_key), + Err(err) => Err(PeerKeyError::DatabaseError { + source: Located(err).into(), + }), + }, + }, + } + } + + /// It generates a new permanent authentication key. + /// + /// Authentication keys are used by HTTP trackers. + /// + /// # Context: Authentication + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `auth_key` to the database. + pub async fn generate_permanent_auth_key(&self) -> Result { + self.generate_auth_key(None).await + } + /// It generates a new expiring authentication key. - /// `lifetime` param is the duration in seconds for the new key. - /// The key will be no longer valid after `lifetime` seconds. + /// /// Authentication keys are used by HTTP trackers. /// /// # Context: Authentication @@ -827,9 +905,64 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to add the `auth_key` to the database. - pub async fn generate_auth_key(&self, lifetime: Duration) -> Result { - let auth_key = auth::generate(lifetime); - self.database.add_key_to_keys(&auth_key).await?; + /// + /// # Arguments + /// + /// * `lifetime` - The duration in seconds for the new key. The key will be + /// no longer valid after `lifetime` seconds. + pub async fn generate_auth_key(&self, lifetime: Option) -> Result { + let auth_key = auth::generate_key(lifetime); + + self.database.add_key_to_keys(&auth_key)?; + self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); + Ok(auth_key) + } + + /// It adds a pre-generated permanent authentication key. + /// + /// Authentication keys are used by HTTP trackers. + /// + /// # Context: Authentication + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `auth_key` to the + /// database. For example, if the key already exist. + /// + /// # Arguments + /// + /// * `key` - The pre-generated key. + pub async fn add_permanent_auth_key(&self, key: Key) -> Result { + self.add_auth_key(key, None).await + } + + /// It adds a pre-generated authentication key. + /// + /// Authentication keys are used by HTTP trackers. + /// + /// # Context: Authentication + /// + /// # Errors + /// + /// Will return a `database::Error` if unable to add the `auth_key` to the + /// database. For example, if the key already exist. + /// + /// # Arguments + /// + /// * `key` - The pre-generated key. + /// * `lifetime` - The duration in seconds for the new key. The key will be + /// no longer valid after `lifetime` seconds. + pub async fn add_auth_key( + &self, + key: Key, + valid_until: Option, + ) -> Result { + let auth_key = PeerKey { key, valid_until }; + + // code-review: should we return a friendly error instead of the DB + // constrain error when the key already exist? For now, it's returning + // the specif error for each DB driver when a UNIQUE constrain fails. + self.database.add_key_to_keys(&auth_key)?; self.keys.write().await.insert(auth_key.key.clone(), auth_key.clone()); Ok(auth_key) } @@ -841,12 +974,8 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to remove the `key` to the database. - /// - /// # Panics - /// - /// Will panic if key cannot be converted into a valid `Key`. pub async fn remove_auth_key(&self, key: &Key) -> Result<(), databases::error::Error> { - self.database.remove_key_from_keys(key).await?; + self.database.remove_key_from_keys(key)?; self.keys.write().await.remove(key); Ok(()) } @@ -858,15 +987,22 @@ impl Tracker { /// # Errors /// /// Will return a `key::Error` if unable to get any `auth_key`. - pub async fn verify_auth_key(&self, key: &Key) -> Result<(), auth::Error> { - // code-review: this function is public only because it's used in a test. - // We should change the test and make it private. + async fn verify_auth_key(&self, key: &Key) -> Result<(), auth::Error> { match self.keys.read().await.get(key) { None => Err(auth::Error::UnableToReadKey { location: Location::caller(), key: Box::new(key.clone()), }), - Some(key) => auth::verify(key), + Some(key) => match self.config.private_mode { + Some(private_mode) => { + if private_mode.check_keys_expiration { + return auth::verify_key_expiration(key); + } + + Ok(()) + } + None => auth::verify_key_expiration(key), + }, } } @@ -881,7 +1017,7 @@ impl Tracker { /// /// Will return a `database::Error` if unable to `load_keys` from the database. pub async fn load_keys_from_database(&self) -> Result<(), databases::error::Error> { - let keys_from_database = self.database.load_keys().await?; + let keys_from_database = self.database.load_keys()?; let mut keys = self.keys.write().await; keys.clear(); @@ -893,62 +1029,6 @@ impl Tracker { Ok(()) } - /// It authenticates and authorizes a UDP tracker request. - /// - /// # Context: Authentication and Authorization - /// - /// # Errors - /// - /// Will return a `torrent::Error::PeerKeyNotValid` if the `key` is not valid. - /// - /// Will return a `torrent::Error::PeerNotAuthenticated` if the `key` is `None`. - /// - /// Will return a `torrent::Error::TorrentNotWhitelisted` if the the Tracker is in listed mode and the `info_hash` is not whitelisted. - #[deprecated(since = "3.0.0", note = "please use `authenticate` and `authorize` instead")] - pub async fn authenticate_request(&self, info_hash: &InfoHash, key: &Option) -> Result<(), Error> { - // todo: this is a deprecated method. - // We're splitting authentication and authorization responsibilities. - // Use `authenticate` and `authorize` instead. - - // Authentication - - // no authentication needed in public mode - if self.is_public() { - return Ok(()); - } - - // check if auth_key is set and valid - if self.is_private() { - match key { - Some(key) => { - if let Err(e) = self.verify_auth_key(key).await { - return Err(Error::PeerKeyNotValid { - key: key.clone(), - source: (Arc::new(e) as Arc).into(), - }); - } - } - None => { - return Err(Error::PeerNotAuthenticated { - location: Location::caller(), - }); - } - } - } - - // Authorization - - // check if info_hash is whitelisted - if self.is_whitelisted() && !self.is_info_hash_whitelisted(info_hash).await { - return Err(Error::TorrentNotWhitelisted { - info_hash: *info_hash, - location: Location::caller(), - }); - } - - Ok(()) - } - /// Right now, there is only authorization when the `Tracker` runs in /// `listed` or `private_listed` modes. /// @@ -959,7 +1039,7 @@ impl Tracker { /// Will return an error if the tracker is running in `listed` mode /// and the infohash is not whitelisted. pub async fn authorize(&self, info_hash: &InfoHash) -> Result<(), Error> { - if !self.is_whitelisted() { + if !self.is_listed() { return Ok(()); } @@ -982,20 +1062,20 @@ impl Tracker { /// /// Will return a `database::Error` if unable to add the `info_hash` into the whitelist database. pub async fn add_torrent_to_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - self.add_torrent_to_database_whitelist(info_hash).await?; + self.add_torrent_to_database_whitelist(info_hash)?; self.add_torrent_to_memory_whitelist(info_hash).await; Ok(()) } /// It adds a torrent to the whitelist if it has not been whitelisted previously - async fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - let is_whitelisted = self.database.is_info_hash_whitelisted(info_hash).await?; + fn add_torrent_to_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; if is_whitelisted { return Ok(()); } - self.database.add_info_hash_to_whitelist(*info_hash).await?; + self.database.add_info_hash_to_whitelist(*info_hash)?; Ok(()) } @@ -1013,7 +1093,7 @@ impl Tracker { /// /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. pub async fn remove_torrent_from_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - self.remove_torrent_from_database_whitelist(info_hash).await?; + self.remove_torrent_from_database_whitelist(info_hash)?; self.remove_torrent_from_memory_whitelist(info_hash).await; Ok(()) } @@ -1025,14 +1105,14 @@ impl Tracker { /// # Errors /// /// Will return a `database::Error` if unable to remove the `info_hash` from the whitelist database. - pub async fn remove_torrent_from_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { - let is_whitelisted = self.database.is_info_hash_whitelisted(info_hash).await?; + pub fn remove_torrent_from_database_whitelist(&self, info_hash: &InfoHash) -> Result<(), databases::error::Error> { + let is_whitelisted = self.database.is_info_hash_whitelisted(*info_hash)?; if !is_whitelisted { return Ok(()); } - self.database.remove_info_hash_from_whitelist(*info_hash).await?; + self.database.remove_info_hash_from_whitelist(*info_hash)?; Ok(()) } @@ -1059,7 +1139,7 @@ impl Tracker { /// /// Will return a `database::Error` if unable to load the list whitelisted `info_hash`s from the database. pub async fn load_whitelist_from_database(&self) -> Result<(), databases::error::Error> { - let whitelisted_torrents_from_database = self.database.load_whitelist().await?; + let whitelisted_torrents_from_database = self.database.load_whitelist()?; let mut whitelist = self.whitelist.write().await; whitelist.clear(); @@ -1074,7 +1154,7 @@ impl Tracker { /// It return the `Tracker` [`statistics::Metrics`]. /// /// # Context: Statistics - pub async fn get_stats(&self) -> RwLockReadGuard<'_, statistics::Metrics> { + pub async fn get_stats(&self) -> tokio::sync::RwLockReadGuard<'_, statistics::Metrics> { self.stats_repository.get_stats().await } @@ -1087,6 +1167,17 @@ impl Tracker { Some(stats_event_sender) => stats_event_sender.send_event(event).await, } } + + /// It drops the database tables. + /// + /// # Errors + /// + /// Will return `Err` if unable to drop tables. + pub fn drop_database_tables(&self) -> Result<(), databases::error::Error> { + // todo: this is only used for testing. WE have to pass the database + // reference directly to the tests instead of via the tracker. + self.database.drop_database_tables() + } } #[must_use] @@ -1107,31 +1198,32 @@ mod tests { use std::str::FromStr; use std::sync::Arc; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::{DurationSinceUnixEpoch, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; - use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::shared::clock::DurationSinceUnixEpoch; - use crate::tracker::peer::{self, Peer}; - use crate::tracker::services::tracker_factory; - use crate::tracker::{TorrentsMetrics, Tracker}; + use crate::core::peer::{self, Peer}; + use crate::core::services::tracker_factory; + use crate::core::{TorrentsMetrics, Tracker}; + use crate::shared::bit_torrent::info_hash::fixture::gen_seeded_infohash; fn public_tracker() -> Tracker { - tracker_factory(configuration::ephemeral_mode_public().into()) + tracker_factory(&configuration::ephemeral_public()) } fn private_tracker() -> Tracker { - tracker_factory(configuration::ephemeral_mode_private().into()) + tracker_factory(&configuration::ephemeral_private()) } fn whitelisted_tracker() -> Tracker { - tracker_factory(configuration::ephemeral_mode_whitelisted().into()) + tracker_factory(&configuration::ephemeral_listed()) } pub fn tracker_persisting_torrents_in_database() -> Tracker { let mut configuration = configuration::ephemeral(); - configuration.persistent_torrent_completed_stat = true; - tracker_factory(Arc::new(configuration)) + configuration.core.tracker_policy.persistent_torrent_completed_stat = true; + tracker_factory(&configuration) } fn sample_info_hash() -> InfoHash { @@ -1222,14 +1314,14 @@ mod tests { async fn should_collect_torrent_metrics() { let tracker = public_tracker(); - let torrents_metrics = tracker.get_torrents_metrics().await; + let torrents_metrics = tracker.get_torrents_metrics(); assert_eq!( torrents_metrics, TorrentsMetrics { - seeders: 0, - completed: 0, - leechers: 0, + complete: 0, + downloaded: 0, + incomplete: 0, torrents: 0 } ); @@ -1242,11 +1334,11 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; + tracker.upsert_peer_and_get_stats(&info_hash, &peer); - let peers = tracker.get_all_torrent_peers(&info_hash).await; + let peers = tracker.get_torrent_peers(&info_hash); - assert_eq!(peers, vec![peer]); + assert_eq!(peers, vec![Arc::new(peer)]); } #[tokio::test] @@ -1256,9 +1348,9 @@ mod tests { let info_hash = sample_info_hash(); let peer = sample_peer(); - tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; + tracker.upsert_peer_and_get_stats(&info_hash, &peer); - let peers = tracker.get_peers_for_peer(&info_hash, &peer).await; + let peers = tracker.get_peers_for(&info_hash, &peer); assert_eq!(peers, vec![]); } @@ -1267,28 +1359,54 @@ mod tests { async fn it_should_return_the_torrent_metrics() { let tracker = public_tracker(); - tracker - .update_torrent_with_peer_and_get_stats(&sample_info_hash(), &leecher()) - .await; + tracker.upsert_peer_and_get_stats(&sample_info_hash(), &leecher()); - let torrent_metrics = tracker.get_torrents_metrics().await; + let torrent_metrics = tracker.get_torrents_metrics(); assert_eq!( torrent_metrics, TorrentsMetrics { - seeders: 0, - completed: 0, - leechers: 1, + complete: 0, + downloaded: 0, + incomplete: 1, torrents: 1, } ); } + #[tokio::test] + async fn it_should_get_many_the_torrent_metrics() { + let tracker = public_tracker(); + + let start_time = std::time::Instant::now(); + for i in 0..1_000_000 { + tracker.upsert_peer_and_get_stats(&gen_seeded_infohash(&i), &leecher()); + } + let result_a = start_time.elapsed(); + + let start_time = std::time::Instant::now(); + let torrent_metrics = tracker.get_torrents_metrics(); + let result_b = start_time.elapsed(); + + assert_eq!( + (torrent_metrics), + (TorrentsMetrics { + complete: 0, + downloaded: 0, + incomplete: 1_000_000, + torrents: 1_000_000, + }), + "{result_a:?} {result_b:?}" + ); + } + mod for_all_config_modes { mod handling_an_announce_request { - use crate::tracker::tests::the_tracker::{ + use std::sync::Arc; + + use crate::core::tests::the_tracker::{ peer_ip, public_tracker, sample_info_hash, sample_peer, sample_peer_1, sample_peer_2, }; @@ -1296,7 +1414,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr}; - use crate::tracker::assign_ip_address_to_peer; + use crate::core::assign_ip_address_to_peer; #[test] fn using_the_source_ip_instead_of_the_ip_in_the_announce_request() { @@ -1312,7 +1430,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::str::FromStr; - use crate::tracker::assign_ip_address_to_peer; + use crate::core::assign_ip_address_to_peer; #[test] fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { @@ -1353,7 +1471,7 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::str::FromStr; - use crate::tracker::assign_ip_address_to_peer; + use crate::core::assign_ip_address_to_peer; #[test] fn it_should_use_the_loopback_ip_if_the_tracker_does_not_have_the_external_ip_configuration() { @@ -1396,7 +1514,7 @@ mod tests { let mut peer = sample_peer(); - let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; + let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()); assert_eq!(announce_data.peers, vec![]); } @@ -1406,19 +1524,17 @@ mod tests { let tracker = public_tracker(); let mut previously_announced_peer = sample_peer_1(); - tracker - .announce(&sample_info_hash(), &mut previously_announced_peer, &peer_ip()) - .await; + tracker.announce(&sample_info_hash(), &mut previously_announced_peer, &peer_ip()); let mut peer = sample_peer_2(); - let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; + let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()); - assert_eq!(announce_data.peers, vec![previously_announced_peer]); + assert_eq!(announce_data.peers, vec![Arc::new(previously_announced_peer)]); } mod it_should_update_the_swarm_stats_for_the_torrent { - use crate::tracker::tests::the_tracker::{ + use crate::core::tests::the_tracker::{ completed_peer, leecher, peer_ip, public_tracker, sample_info_hash, seeder, started_peer, }; @@ -1428,9 +1544,9 @@ mod tests { let mut peer = seeder(); - let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; + let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()); - assert_eq!(announce_data.swarm_stats.seeders, 1); + assert_eq!(announce_data.stats.complete, 1); } #[tokio::test] @@ -1439,9 +1555,9 @@ mod tests { let mut peer = leecher(); - let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()).await; + let announce_data = tracker.announce(&sample_info_hash(), &mut peer, &peer_ip()); - assert_eq!(announce_data.swarm_stats.leechers, 1); + assert_eq!(announce_data.stats.incomplete, 1); } #[tokio::test] @@ -1450,12 +1566,12 @@ mod tests { // We have to announce with "started" event because peer does not count if peer was not previously known let mut started_peer = started_peer(); - tracker.announce(&sample_info_hash(), &mut started_peer, &peer_ip()).await; + tracker.announce(&sample_info_hash(), &mut started_peer, &peer_ip()); let mut completed_peer = completed_peer(); - let announce_data = tracker.announce(&sample_info_hash(), &mut completed_peer, &peer_ip()).await; + let announce_data = tracker.announce(&sample_info_hash(), &mut completed_peer, &peer_ip()); - assert_eq!(announce_data.swarm_stats.completed, 1); + assert_eq!(announce_data.stats.downloaded, 1); } } } @@ -1464,9 +1580,10 @@ mod tests { use std::net::{IpAddr, Ipv4Addr}; - use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::tracker::tests::the_tracker::{complete_peer, incomplete_peer, public_tracker}; - use crate::tracker::{ScrapeData, SwarmMetadata}; + use torrust_tracker_primitives::info_hash::InfoHash; + + use crate::core::tests::the_tracker::{complete_peer, incomplete_peer, public_tracker}; + use crate::core::{ScrapeData, SwarmMetadata}; #[tokio::test] async fn it_should_return_a_zeroed_swarm_metadata_for_the_requested_file_if_the_tracker_does_not_have_that_torrent( @@ -1492,15 +1609,11 @@ mod tests { // Announce a "complete" peer for the torrent let mut complete_peer = complete_peer(); - tracker - .announce(&info_hash, &mut complete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 10))) - .await; + tracker.announce(&info_hash, &mut complete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 10))); // Announce an "incomplete" peer for the torrent let mut incomplete_peer = incomplete_peer(); - tracker - .announce(&info_hash, &mut incomplete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 11))) - .await; + tracker.announce(&info_hash, &mut incomplete_peer, &IpAddr::V4(Ipv4Addr::new(126, 0, 0, 11))); // Scrape let scrape_data = tracker.scrape(&vec![info_hash]).await; @@ -1542,7 +1655,7 @@ mod tests { mod configured_as_whitelisted { mod handling_authorization { - use crate::tracker::tests::the_tracker::{sample_info_hash, whitelisted_tracker}; + use crate::core::tests::the_tracker::{sample_info_hash, whitelisted_tracker}; #[tokio::test] async fn it_should_authorize_the_announce_and_scrape_actions_on_whitelisted_torrents() { @@ -1569,7 +1682,7 @@ mod tests { } mod handling_the_torrent_whitelist { - use crate::tracker::tests::the_tracker::{sample_info_hash, whitelisted_tracker}; + use crate::core::tests::the_tracker::{sample_info_hash, whitelisted_tracker}; #[tokio::test] async fn it_should_add_a_torrent_to_the_whitelist() { @@ -1596,7 +1709,7 @@ mod tests { } mod persistence { - use crate::tracker::tests::the_tracker::{sample_info_hash, whitelisted_tracker}; + use crate::core::tests::the_tracker::{sample_info_hash, whitelisted_tracker}; #[tokio::test] async fn it_should_load_the_whitelist_from_the_database() { @@ -1621,12 +1734,13 @@ mod tests { mod handling_an_scrape_request { - use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::tracker::tests::the_tracker::{ + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + + use crate::core::tests::the_tracker::{ complete_peer, incomplete_peer, peer_ip, sample_info_hash, whitelisted_tracker, }; - use crate::tracker::torrent::SwarmMetadata; - use crate::tracker::ScrapeData; + use crate::core::ScrapeData; #[test] fn it_should_be_able_to_build_a_zeroed_scrape_data_for_a_list_of_info_hashes() { @@ -1647,11 +1761,11 @@ mod tests { let info_hash = "3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0".parse::().unwrap(); let mut peer = incomplete_peer(); - tracker.announce(&info_hash, &mut peer, &peer_ip()).await; + tracker.announce(&info_hash, &mut peer, &peer_ip()); // Announce twice to force non zeroed swarm metadata let mut peer = complete_peer(); - tracker.announce(&info_hash, &mut peer, &peer_ip()).await; + tracker.announce(&info_hash, &mut peer, &peer_ip()); let scrape_data = tracker.scrape(&vec![info_hash]).await; @@ -1670,28 +1784,8 @@ mod tests { use std::str::FromStr; use std::time::Duration; - use crate::tracker::auth; - use crate::tracker::tests::the_tracker::private_tracker; - - #[tokio::test] - async fn it_should_generate_the_expiring_authentication_keys() { - let tracker = private_tracker(); - - let key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); - - assert_eq!(key.valid_until, Duration::from_secs(100)); - } - - #[tokio::test] - async fn it_should_authenticate_a_peer_by_using_a_key() { - let tracker = private_tracker(); - - let expiring_key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); - - let result = tracker.authenticate(&expiring_key.key()).await; - - assert!(result.is_ok()); - } + use crate::core::auth::{self}; + use crate::core::tests::the_tracker::private_tracker; #[tokio::test] async fn it_should_fail_authenticating_a_peer_when_it_uses_an_unregistered_key() { @@ -1704,17 +1798,6 @@ mod tests { assert!(result.is_err()); } - #[tokio::test] - async fn it_should_verify_a_valid_authentication_key() { - // todo: this should not be tested directly because - // `verify_auth_key` should be a private method. - let tracker = private_tracker(); - - let expiring_key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); - - assert!(tracker.verify_auth_key(&expiring_key.key()).await.is_ok()); - } - #[tokio::test] async fn it_should_fail_verifying_an_unregistered_authentication_key() { let tracker = private_tracker(); @@ -1728,7 +1811,7 @@ mod tests { async fn it_should_remove_an_authentication_key() { let tracker = private_tracker(); - let expiring_key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + let expiring_key = tracker.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); let result = tracker.remove_auth_key(&expiring_key.key()).await; @@ -1740,7 +1823,7 @@ mod tests { async fn it_should_load_authentication_keys_from_the_database() { let tracker = private_tracker(); - let expiring_key = tracker.generate_auth_key(Duration::from_secs(100)).await.unwrap(); + let expiring_key = tracker.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); // Remove the newly generated key in memory tracker.keys.write().await.remove(&expiring_key.key()); @@ -1750,6 +1833,192 @@ mod tests { assert!(result.is_ok()); assert!(tracker.verify_auth_key(&expiring_key.key()).await.is_ok()); } + + mod with_expiring_and { + + mod randomly_generated_keys { + use std::time::Duration; + + use torrust_tracker_clock::clock::Time; + use torrust_tracker_configuration::v2_0_0::core::PrivateMode; + + use crate::core::auth::Key; + use crate::core::tests::the_tracker::private_tracker; + use crate::CurrentClock; + + #[tokio::test] + async fn it_should_generate_the_key() { + let tracker = private_tracker(); + + let peer_key = tracker.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); + + assert_eq!( + peer_key.valid_until, + Some(CurrentClock::now_add(&Duration::from_secs(100)).unwrap()) + ); + } + + #[tokio::test] + async fn it_should_authenticate_a_peer_with_the_key() { + let tracker = private_tracker(); + + let peer_key = tracker.generate_auth_key(Some(Duration::from_secs(100))).await.unwrap(); + + let result = tracker.authenticate(&peer_key.key()).await; + + assert!(result.is_ok()); + } + + #[tokio::test] + async fn it_should_accept_an_expired_key_when_checking_expiration_is_disabled_in_configuration() { + let mut tracker = private_tracker(); + + tracker.config.private_mode = Some(PrivateMode { + check_keys_expiration: false, + }); + + let past_timestamp = Duration::ZERO; + + let peer_key = tracker + .add_auth_key(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(), Some(past_timestamp)) + .await + .unwrap(); + + assert!(tracker.authenticate(&peer_key.key()).await.is_ok()); + } + } + + mod pre_generated_keys { + use std::time::Duration; + + use torrust_tracker_clock::clock::Time; + use torrust_tracker_configuration::v2_0_0::core::PrivateMode; + + use crate::core::auth::Key; + use crate::core::tests::the_tracker::private_tracker; + use crate::core::AddKeyRequest; + use crate::CurrentClock; + + #[tokio::test] + async fn it_should_add_a_pre_generated_key() { + let tracker = private_tracker(); + + let peer_key = tracker + .add_peer_key(AddKeyRequest { + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), + opt_seconds_valid: Some(100), + }) + .await + .unwrap(); + + assert_eq!( + peer_key.valid_until, + Some(CurrentClock::now_add(&Duration::from_secs(100)).unwrap()) + ); + } + + #[tokio::test] + async fn it_should_authenticate_a_peer_with_the_key() { + let tracker = private_tracker(); + + let peer_key = tracker + .add_peer_key(AddKeyRequest { + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), + opt_seconds_valid: Some(100), + }) + .await + .unwrap(); + + let result = tracker.authenticate(&peer_key.key()).await; + + assert!(result.is_ok()); + } + + #[tokio::test] + async fn it_should_accept_an_expired_key_when_checking_expiration_is_disabled_in_configuration() { + let mut tracker = private_tracker(); + + tracker.config.private_mode = Some(PrivateMode { + check_keys_expiration: false, + }); + + let peer_key = tracker + .add_peer_key(AddKeyRequest { + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), + opt_seconds_valid: Some(0), + }) + .await + .unwrap(); + + assert!(tracker.authenticate(&peer_key.key()).await.is_ok()); + } + } + } + + mod with_permanent_and { + + mod randomly_generated_keys { + use crate::core::tests::the_tracker::private_tracker; + + #[tokio::test] + async fn it_should_generate_the_key() { + let tracker = private_tracker(); + + let peer_key = tracker.generate_permanent_auth_key().await.unwrap(); + + assert_eq!(peer_key.valid_until, None); + } + + #[tokio::test] + async fn it_should_authenticate_a_peer_with_the_key() { + let tracker = private_tracker(); + + let peer_key = tracker.generate_permanent_auth_key().await.unwrap(); + + let result = tracker.authenticate(&peer_key.key()).await; + + assert!(result.is_ok()); + } + } + + mod pre_generated_keys { + use crate::core::auth::Key; + use crate::core::tests::the_tracker::private_tracker; + use crate::core::AddKeyRequest; + + #[tokio::test] + async fn it_should_add_a_pre_generated_key() { + let tracker = private_tracker(); + + let peer_key = tracker + .add_peer_key(AddKeyRequest { + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), + opt_seconds_valid: None, + }) + .await + .unwrap(); + + assert_eq!(peer_key.valid_until, None); + } + + #[tokio::test] + async fn it_should_authenticate_a_peer_with_the_key() { + let tracker = private_tracker(); + + let peer_key = tracker + .add_peer_key(AddKeyRequest { + opt_key: Some(Key::new("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap().to_string()), + opt_seconds_valid: None, + }) + .await + .unwrap(); + + let result = tracker.authenticate(&peer_key.key()).await; + + assert!(result.is_ok()); + } + } + } } mod handling_an_announce_request {} @@ -1765,9 +2034,12 @@ mod tests { } mod handling_torrent_persistence { - use aquatic_udp_protocol::AnnounceEvent; - use crate::tracker::tests::the_tracker::{sample_info_hash, sample_peer, tracker_persisting_torrents_in_database}; + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_torrent_repository::entry::EntrySync; + use torrust_tracker_torrent_repository::repository::Repository; + + use crate::core::tests::the_tracker::{sample_info_hash, sample_peer, tracker_persisting_torrents_in_database}; #[tokio::test] async fn it_should_persist_the_number_of_completed_peers_for_all_torrents_into_the_database() { @@ -1778,28 +2050,25 @@ mod tests { let mut peer = sample_peer(); peer.event = AnnounceEvent::Started; - let swarm_stats = tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; - assert_eq!(swarm_stats.completed, 0); + let swarm_stats = tracker.upsert_peer_and_get_stats(&info_hash, &peer); + assert_eq!(swarm_stats.downloaded, 0); peer.event = AnnounceEvent::Completed; - let swarm_stats = tracker.update_torrent_with_peer_and_get_stats(&info_hash, &peer).await; - assert_eq!(swarm_stats.completed, 1); + let swarm_stats = tracker.upsert_peer_and_get_stats(&info_hash, &peer); + assert_eq!(swarm_stats.downloaded, 1); // Remove the newly updated torrent from memory - tracker.torrents.write().await.remove(&info_hash); - - tracker.load_torrents_from_database().await.unwrap(); + tracker.torrents.remove(&info_hash); - let torrents = tracker.get_torrents().await; - assert!(torrents.contains_key(&info_hash)); + tracker.load_torrents_from_database().unwrap(); - let torrent_entry = torrents.get(&info_hash).unwrap(); + let torrent_entry = tracker.torrents.get(&info_hash).expect("it should be able to get entry"); // It persists the number of completed peers. - assert_eq!(torrent_entry.completed, 1); + assert_eq!(torrent_entry.get_swarm_metadata().downloaded, 1); // It does not persist the peers - assert!(torrent_entry.peers.is_empty()); + assert!(torrent_entry.peers_is_empty()); } } } diff --git a/src/core/peer_tests.rs b/src/core/peer_tests.rs new file mode 100644 index 000000000..d30d73db3 --- /dev/null +++ b/src/core/peer_tests.rs @@ -0,0 +1,47 @@ +#![cfg(test)] + +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use torrust_tracker_clock::clock::stopped::Stopped as _; +use torrust_tracker_clock::clock::{self, Time}; +use torrust_tracker_primitives::announce_event::AnnounceEvent; +use torrust_tracker_primitives::{peer, NumberOfBytes}; + +use crate::CurrentClock; + +#[test] +fn it_should_be_serializable() { + clock::Stopped::local_set_to_unix_epoch(); + + let torrent_peer = peer::Peer { + peer_id: peer::Id(*b"-qB0000-000000000000"), + peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), + updated: CurrentClock::now(), + uploaded: NumberOfBytes(0), + downloaded: NumberOfBytes(0), + left: NumberOfBytes(0), + event: AnnounceEvent::Started, + }; + + let raw_json = serde_json::to_string(&torrent_peer).unwrap(); + + let expected_raw_json = r#" + { + "peer_id": { + "id": "0x2d7142303030302d303030303030303030303030", + "client": "qBittorrent" + }, + "peer_addr":"126.0.0.1:8080", + "updated":0, + "uploaded":0, + "downloaded":0, + "left":0, + "event":"Started" + } + "#; + + assert_eq!( + serde_json::from_str::(&raw_json).unwrap(), + serde_json::from_str::(expected_raw_json).unwrap() + ); +} diff --git a/src/tracker/services/mod.rs b/src/core/services/mod.rs similarity index 53% rename from src/tracker/services/mod.rs rename to src/core/services/mod.rs index deb07a439..166f40df4 100644 --- a/src/tracker/services/mod.rs +++ b/src/core/services/mod.rs @@ -2,8 +2,8 @@ //! //! There are two types of service: //! -//! - [Core tracker services](crate::tracker::services::torrent): related to the tracker main functionalities like getting info about torrents. -//! - [Services for statistics](crate::tracker::services::statistics): related to tracker metrics. Aggregate data about the tracker server. +//! - [Core tracker services](crate::core::services::torrent): related to the tracker main functionalities like getting info about torrents. +//! - [Services for statistics](crate::core::services::statistics): related to tracker metrics. Aggregate data about the tracker server. pub mod statistics; pub mod torrent; @@ -11,7 +11,7 @@ use std::sync::Arc; use torrust_tracker_configuration::Configuration; -use crate::tracker::Tracker; +use crate::core::Tracker; /// It returns a new tracker building its dependencies. /// @@ -19,12 +19,12 @@ use crate::tracker::Tracker; /// /// Will panic if tracker cannot be instantiated. #[must_use] -pub fn tracker_factory(config: Arc) -> Tracker { +pub fn tracker_factory(config: &Configuration) -> Tracker { // Initialize statistics - let (stats_event_sender, stats_repository) = statistics::setup::factory(config.tracker_usage_statistics); + let (stats_event_sender, stats_repository) = statistics::setup::factory(config.core.tracker_usage_statistics); // Initialize Torrust tracker - match Tracker::new(config, stats_event_sender, stats_repository) { + match Tracker::new(&Arc::new(config).core, stats_event_sender, stats_repository) { Ok(tracker) => tracker, Err(error) => { panic!("{}", error) diff --git a/src/tracker/services/statistics/mod.rs b/src/core/services/statistics/mod.rs similarity index 71% rename from src/tracker/services/statistics/mod.rs rename to src/core/services/statistics/mod.rs index 3761e38de..ee1c0c4fa 100644 --- a/src/tracker/services/statistics/mod.rs +++ b/src/core/services/statistics/mod.rs @@ -2,15 +2,15 @@ //! //! It includes: //! -//! - A [`factory`](crate::tracker::services::statistics::setup::factory) function to build the structs needed to collect the tracker metrics. -//! - A [`get_metrics`](crate::tracker::services::statistics::get_metrics) service to get the [`tracker metrics`](crate::tracker::statistics::Metrics). +//! - A [`factory`](crate::core::services::statistics::setup::factory) function to build the structs needed to collect the tracker metrics. +//! - A [`get_metrics`] service to get the [`tracker metrics`](crate::core::statistics::Metrics). //! //! Tracker metrics are collected using a Publisher-Subscribe pattern. //! //! The factory function builds two structs: //! -//! - An statistics [`EventSender`](crate::tracker::statistics::EventSender) -//! - An statistics [`Repo`](crate::tracker::statistics::Repo) +//! - An statistics [`EventSender`](crate::core::statistics::EventSender) +//! - An statistics [`Repo`](crate::core::statistics::Repo) //! //! ```text //! let (stats_event_sender, stats_repository) = factory(tracker_usage_statistics); @@ -21,7 +21,7 @@ //! There is an event listener that is receiving all the events and processing them with an event handler. //! Then, the event handler updates the metrics depending on the received event. //! -//! For example, if you send the event [`Event::Udp4Connect`](crate::tracker::statistics::Event::Udp4Connect): +//! For example, if you send the event [`Event::Udp4Connect`](crate::core::statistics::Event::Udp4Connect): //! //! ```text //! let result = event_sender.send_event(Event::Udp4Connect).await; @@ -40,8 +40,10 @@ pub mod setup; use std::sync::Arc; -use crate::tracker::statistics::Metrics; -use crate::tracker::{TorrentsMetrics, Tracker}; +use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + +use crate::core::statistics::Metrics; +use crate::core::Tracker; /// All the metrics collected by the tracker. #[derive(Debug, PartialEq)] @@ -57,9 +59,9 @@ pub struct TrackerMetrics { pub protocol_metrics: Metrics, } -/// It returns all the [`TrackerMetrics`](crate::tracker::services::statistics::TrackerMetrics) +/// It returns all the [`TrackerMetrics`] pub async fn get_metrics(tracker: Arc) -> TrackerMetrics { - let torrents_metrics = tracker.get_torrents_metrics().await; + let torrents_metrics = tracker.get_torrents_metrics(); let stats = tracker.get_stats().await; TrackerMetrics { @@ -86,27 +88,28 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; use torrust_tracker_test_helpers::configuration; - use crate::tracker; - use crate::tracker::services::statistics::{get_metrics, TrackerMetrics}; - use crate::tracker::services::tracker_factory; + use crate::core; + use crate::core::services::statistics::{get_metrics, TrackerMetrics}; + use crate::core::services::tracker_factory; - pub fn tracker_configuration() -> Arc { - Arc::new(configuration::ephemeral()) + pub fn tracker_configuration() -> Configuration { + configuration::ephemeral() } #[tokio::test] async fn the_statistics_service_should_return_the_tracker_metrics() { - let tracker = Arc::new(tracker_factory(tracker_configuration())); + let tracker = Arc::new(tracker_factory(&tracker_configuration())); let tracker_metrics = get_metrics(tracker.clone()).await; assert_eq!( tracker_metrics, TrackerMetrics { - torrents_metrics: tracker::TorrentsMetrics::default(), - protocol_metrics: tracker::statistics::Metrics::default(), + torrents_metrics: TorrentsMetrics::default(), + protocol_metrics: core::statistics::Metrics::default(), } ); } diff --git a/src/tracker/services/statistics/setup.rs b/src/core/services/statistics/setup.rs similarity index 75% rename from src/tracker/services/statistics/setup.rs rename to src/core/services/statistics/setup.rs index b8d325ab4..37603852b 100644 --- a/src/tracker/services/statistics/setup.rs +++ b/src/core/services/statistics/setup.rs @@ -1,14 +1,14 @@ //! Setup for the tracker statistics. //! -//! The [`factory`](crate::tracker::services::statistics::setup::factory) function builds the structs needed for handling the tracker metrics. -use crate::tracker::statistics; +//! The [`factory`] function builds the structs needed for handling the tracker metrics. +use crate::core::statistics; /// It builds the structs needed for handling the tracker metrics. /// /// It returns: /// -/// - An statistics [`EventSender`](crate::tracker::statistics::EventSender) that allows you to send events related to statistics. -/// - An statistics [`Repo`](crate::tracker::statistics::Repo) which is an in-memory repository for the tracker metrics. +/// - An statistics [`EventSender`](crate::core::statistics::EventSender) that allows you to send events related to statistics. +/// - An statistics [`Repo`](crate::core::statistics::Repo) which is an in-memory repository for the tracker metrics. /// /// When the input argument `tracker_usage_statistics`is false the setup does not run the event listeners, consequently the statistics /// events are sent are received but not dispatched to the handler. diff --git a/src/tracker/services/torrent.rs b/src/core/services/torrent.rs similarity index 54% rename from src/tracker/services/torrent.rs rename to src/core/services/torrent.rs index 0db044d07..1c337a41d 100644 --- a/src/tracker/services/torrent.rs +++ b/src/core/services/torrent.rs @@ -2,15 +2,17 @@ //! //! There are two services: //! -//! - [`get_torrent_info`](crate::tracker::services::torrent::get_torrent_info): it returns all the data about one torrent. -//! - [`get_torrents`](crate::tracker::services::torrent::get_torrents): it returns data about some torrent in bulk excluding the peer list. +//! - [`get_torrent_info`]: it returns all the data about one torrent. +//! - [`get_torrents`]: it returns data about some torrent in bulk excluding the peer list. use std::sync::Arc; -use serde::Deserialize; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use torrust_tracker_primitives::peer; +use torrust_tracker_torrent_repository::entry::EntrySync; +use torrust_tracker_torrent_repository::repository::Repository; -use crate::shared::bit_torrent::info_hash::InfoHash; -use crate::tracker::peer::Peer; -use crate::tracker::Tracker; +use crate::core::Tracker; /// It contains all the information the tracker has about a torrent #[derive(Debug, PartialEq)] @@ -24,12 +26,12 @@ pub struct Info { /// The total number of leechers for this torrent. Peers that actively downloading this torrent pub leechers: u64, /// The swarm: the list of peers that are actively trying to download or serving this torrent - pub peers: Option>, + pub peers: Option>, } /// It contains only part of the information the tracker has about a torrent /// -/// It contains the same data as [Info](crate::tracker::services::torrent::Info) but without the list of peers in the swarm. +/// It contains the same data as [Info] but without the list of peers in the swarm. #[derive(Debug, PartialEq, Clone)] pub struct BasicInfo { /// The infohash of the torrent this data is related to @@ -42,107 +44,69 @@ pub struct BasicInfo { pub leechers: u64, } -/// A struct to keep information about the page when results are being paginated -#[derive(Deserialize)] -pub struct Pagination { - /// The page number, starting at 0 - pub offset: u32, - /// Page size. The number of results per page - pub limit: u32, -} - -impl Pagination { - #[must_use] - pub fn new(offset: u32, limit: u32) -> Self { - Self { offset, limit } - } - - #[must_use] - pub fn new_with_options(offset_option: Option, limit_option: Option) -> Self { - let offset = match offset_option { - Some(offset) => offset, - None => Pagination::default_offset(), - }; - let limit = match limit_option { - Some(offset) => offset, - None => Pagination::default_limit(), - }; - - Self { offset, limit } - } - - #[must_use] - pub fn default_offset() -> u32 { - 0 - } - - #[must_use] - pub fn default_limit() -> u32 { - 4000 - } -} - -impl Default for Pagination { - fn default() -> Self { - Self { - offset: Self::default_offset(), - limit: Self::default_limit(), - } - } -} - -/// It returns all the information the tracker has about one torrent in a [Info](crate::tracker::services::torrent::Info) struct. +/// It returns all the information the tracker has about one torrent in a [Info] struct. pub async fn get_torrent_info(tracker: Arc, info_hash: &InfoHash) -> Option { - let db = tracker.get_torrents().await; + let torrent_entry_option = tracker.torrents.get(info_hash); - let torrent_entry_option = db.get(info_hash); + let torrent_entry = torrent_entry_option?; - let Some(torrent_entry) = torrent_entry_option else { - return None; - }; + let stats = torrent_entry.get_swarm_metadata(); - let (seeders, completed, leechers) = torrent_entry.get_stats(); - - let peers = torrent_entry.get_all_peers(); + let peers = torrent_entry.get_peers(None); let peers = Some(peers.iter().map(|peer| (**peer)).collect()); Some(Info { info_hash: *info_hash, - seeders: u64::from(seeders), - completed: u64::from(completed), - leechers: u64::from(leechers), + seeders: u64::from(stats.complete), + completed: u64::from(stats.downloaded), + leechers: u64::from(stats.incomplete), peers, }) } -/// It returns all the information the tracker has about multiple torrents in a [`BasicInfo`](crate::tracker::services::torrent::BasicInfo) struct, excluding the peer list. -pub async fn get_torrents(tracker: Arc, pagination: &Pagination) -> Vec { - let db = tracker.get_torrents().await; +/// It returns all the information the tracker has about multiple torrents in a [`BasicInfo`] struct, excluding the peer list. +pub async fn get_torrents_page(tracker: Arc, pagination: Option<&Pagination>) -> Vec { + let mut basic_infos: Vec = vec![]; + + for (info_hash, torrent_entry) in tracker.torrents.get_paginated(pagination) { + let stats = torrent_entry.get_swarm_metadata(); - db.iter() - .map(|(info_hash, torrent_entry)| { - let (seeders, completed, leechers) = torrent_entry.get_stats(); - BasicInfo { + basic_infos.push(BasicInfo { + info_hash, + seeders: u64::from(stats.complete), + completed: u64::from(stats.downloaded), + leechers: u64::from(stats.incomplete), + }); + } + + basic_infos +} + +/// It returns all the information the tracker has about multiple torrents in a [`BasicInfo`] struct, excluding the peer list. +pub async fn get_torrents(tracker: Arc, info_hashes: &[InfoHash]) -> Vec { + let mut basic_infos: Vec = vec![]; + + for info_hash in info_hashes { + if let Some(stats) = tracker.torrents.get(info_hash).map(|t| t.get_swarm_metadata()) { + basic_infos.push(BasicInfo { info_hash: *info_hash, - seeders: u64::from(seeders), - completed: u64::from(completed), - leechers: u64::from(leechers), - } - }) - .skip(pagination.offset as usize) - .take(pagination.limit as usize) - .collect() + seeders: u64::from(stats.complete), + completed: u64::from(stats.downloaded), + leechers: u64::from(stats.incomplete), + }); + } + } + + basic_infos } #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - - use crate::shared::clock::DurationSinceUnixEpoch; - use crate::tracker::peer; + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; fn sample_peer() -> peer::Peer { peer::Peer { @@ -162,20 +126,20 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; - use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::tracker::services::torrent::tests::sample_peer; - use crate::tracker::services::torrent::{get_torrent_info, Info}; - use crate::tracker::services::tracker_factory; + use crate::core::services::torrent::tests::sample_peer; + use crate::core::services::torrent::{get_torrent_info, Info}; + use crate::core::services::tracker_factory; - pub fn tracker_configuration() -> Arc { - Arc::new(configuration::ephemeral()) + pub fn tracker_configuration() -> Configuration { + configuration::ephemeral() } #[tokio::test] async fn should_return_none_if_the_tracker_does_not_have_the_torrent() { - let tracker = Arc::new(tracker_factory(tracker_configuration())); + let tracker = Arc::new(tracker_factory(&tracker_configuration())); let torrent_info = get_torrent_info( tracker.clone(), @@ -188,13 +152,11 @@ mod tests { #[tokio::test] async fn should_return_the_torrent_info_if_the_tracker_has_the_torrent() { - let tracker = Arc::new(tracker_factory(tracker_configuration())); + let tracker = Arc::new(tracker_factory(&tracker_configuration())); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - tracker - .update_torrent_with_peer_and_get_stats(&info_hash, &sample_peer()) - .await; + tracker.upsert_peer_and_get_stats(&info_hash, &sample_peer()); let torrent_info = get_torrent_info(tracker.clone(), &info_hash).await.unwrap(); @@ -217,38 +179,36 @@ mod tests { use std::sync::Arc; use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; - use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::tracker::services::torrent::tests::sample_peer; - use crate::tracker::services::torrent::{get_torrents, BasicInfo, Pagination}; - use crate::tracker::services::tracker_factory; + use crate::core::services::torrent::tests::sample_peer; + use crate::core::services::torrent::{get_torrents_page, BasicInfo, Pagination}; + use crate::core::services::tracker_factory; - pub fn tracker_configuration() -> Arc { - Arc::new(configuration::ephemeral()) + pub fn tracker_configuration() -> Configuration { + configuration::ephemeral() } #[tokio::test] async fn should_return_an_empty_result_if_the_tracker_does_not_have_any_torrent() { - let tracker = Arc::new(tracker_factory(tracker_configuration())); + let tracker = Arc::new(tracker_factory(&tracker_configuration())); - let torrents = get_torrents(tracker.clone(), &Pagination::default()).await; + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; assert_eq!(torrents, vec![]); } #[tokio::test] async fn should_return_a_summarized_info_for_all_torrents() { - let tracker = Arc::new(tracker_factory(tracker_configuration())); + let tracker = Arc::new(tracker_factory(&tracker_configuration())); let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - tracker - .update_torrent_with_peer_and_get_stats(&info_hash, &sample_peer()) - .await; + tracker.upsert_peer_and_get_stats(&info_hash, &sample_peer()); - let torrents = get_torrents(tracker.clone(), &Pagination::default()).await; + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; assert_eq!( torrents, @@ -263,48 +223,40 @@ mod tests { #[tokio::test] async fn should_allow_limiting_the_number_of_torrents_in_the_result() { - let tracker = Arc::new(tracker_factory(tracker_configuration())); + let tracker = Arc::new(tracker_factory(&tracker_configuration())); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - tracker - .update_torrent_with_peer_and_get_stats(&info_hash1, &sample_peer()) - .await; - tracker - .update_torrent_with_peer_and_get_stats(&info_hash2, &sample_peer()) - .await; + tracker.upsert_peer_and_get_stats(&info_hash1, &sample_peer()); + tracker.upsert_peer_and_get_stats(&info_hash2, &sample_peer()); let offset = 0; let limit = 1; - let torrents = get_torrents(tracker.clone(), &Pagination::new(offset, limit)).await; + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::new(offset, limit))).await; assert_eq!(torrents.len(), 1); } #[tokio::test] async fn should_allow_using_pagination_in_the_result() { - let tracker = Arc::new(tracker_factory(tracker_configuration())); + let tracker = Arc::new(tracker_factory(&tracker_configuration())); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - tracker - .update_torrent_with_peer_and_get_stats(&info_hash1, &sample_peer()) - .await; - tracker - .update_torrent_with_peer_and_get_stats(&info_hash2, &sample_peer()) - .await; + tracker.upsert_peer_and_get_stats(&info_hash1, &sample_peer()); + tracker.upsert_peer_and_get_stats(&info_hash2, &sample_peer()); let offset = 1; let limit = 4000; - let torrents = get_torrents(tracker.clone(), &Pagination::new(offset, limit)).await; + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::new(offset, limit))).await; assert_eq!(torrents.len(), 1); assert_eq!( @@ -320,21 +272,17 @@ mod tests { #[tokio::test] async fn should_return_torrents_ordered_by_info_hash() { - let tracker = Arc::new(tracker_factory(tracker_configuration())); + let tracker = Arc::new(tracker_factory(&tracker_configuration())); let hash1 = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash1 = InfoHash::from_str(&hash1).unwrap(); - tracker - .update_torrent_with_peer_and_get_stats(&info_hash1, &sample_peer()) - .await; + tracker.upsert_peer_and_get_stats(&info_hash1, &sample_peer()); let hash2 = "03840548643af2a7b63a9f5cbca348bc7150ca3a".to_owned(); let info_hash2 = InfoHash::from_str(&hash2).unwrap(); - tracker - .update_torrent_with_peer_and_get_stats(&info_hash2, &sample_peer()) - .await; + tracker.upsert_peer_and_get_stats(&info_hash2, &sample_peer()); - let torrents = get_torrents(tracker.clone(), &Pagination::default()).await; + let torrents = get_torrents_page(tracker.clone(), Some(&Pagination::default())).await; assert_eq!( torrents, diff --git a/src/tracker/statistics.rs b/src/core/statistics.rs similarity index 93% rename from src/tracker/statistics.rs rename to src/core/statistics.rs index 85cc4f255..bcafda17f 100644 --- a/src/tracker/statistics.rs +++ b/src/core/statistics.rs @@ -13,18 +13,19 @@ //! //! The data is collected by using an `event-sender -> event listener` model. //! -//! The tracker uses an [`statistics::EventSender`](crate::tracker::statistics::EventSender) instance to send an event. -//! The [`statistics::Keeper`](crate::tracker::statistics::Keeper) listens to new events and uses the [`statistics::Repo`](crate::tracker::statistics::Repo) to upgrade and store metrics. +//! The tracker uses an [`statistics::EventSender`](crate::core::statistics::EventSender) instance to send an event. +//! The [`statistics::Keeper`](crate::core::statistics::Keeper) listens to new events and uses the [`statistics::Repo`](crate::core::statistics::Repo) to upgrade and store metrics. //! -//! See the [`statistics::Event`](crate::tracker::statistics::Event) enum to check which events are available. +//! See the [`statistics::Event`](crate::core::statistics::Event) enum to check which events are available. use std::sync::Arc; -use async_trait::async_trait; -use log::debug; +use futures::future::BoxFuture; +use futures::FutureExt; #[cfg(test)] use mockall::{automock, predicate::str}; use tokio::sync::mpsc::error::SendError; use tokio::sync::{mpsc, RwLock, RwLockReadGuard}; +use tracing::debug; const CHANNEL_BUFFER_SIZE: usize = 65_535; @@ -185,24 +186,22 @@ async fn event_handler(event: Event, stats_repository: &Repo) { } /// A trait to allow sending statistics events -#[async_trait] #[cfg_attr(test, automock)] pub trait EventSender: Sync + Send { - async fn send_event(&self, event: Event) -> Option>>; + fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>>; } -/// An [`statistics::EventSender`](crate::tracker::statistics::EventSender) implementation. +/// An [`statistics::EventSender`](crate::core::statistics::EventSender) implementation. /// /// It uses a channel sender to send the statistic events. The channel is created by a -/// [`statistics::Keeper`](crate::tracker::statistics::Keeper) +/// [`statistics::Keeper`](crate::core::statistics::Keeper) pub struct Sender { sender: mpsc::Sender, } -#[async_trait] impl EventSender for Sender { - async fn send_event(&self, event: Event) -> Option>> { - Some(self.sender.send(event).await) + fn send_event(&self, event: Event) -> BoxFuture<'_, Option>>> { + async move { Some(self.sender.send(event).await) }.boxed() } } @@ -307,7 +306,7 @@ impl Repo { mod tests { mod stats_tracker { - use crate::tracker::statistics::{Event, Keeper, Metrics}; + use crate::core::statistics::{Event, Keeper, Metrics}; #[tokio::test] async fn should_contain_the_tracker_statistics() { @@ -331,7 +330,7 @@ mod tests { } mod event_handler { - use crate::tracker::statistics::{event_handler, Event, Repo}; + use crate::core::statistics::{event_handler, Event, Repo}; #[tokio::test] async fn should_increase_the_tcp4_announces_counter_when_it_receives_a_tcp4_announce_event() { diff --git a/src/core/torrent/mod.rs b/src/core/torrent/mod.rs new file mode 100644 index 000000000..38311864b --- /dev/null +++ b/src/core/torrent/mod.rs @@ -0,0 +1,30 @@ +//! Structs to store the swarm data. +//! +//! There are to main data structures: +//! +//! - A torrent [`Entry`](torrust_tracker_torrent_repository::entry::Entry): it contains all the information stored by the tracker for one torrent. +//! - The [`SwarmMetadata`](torrust_tracker_primitives::swarm_metadata::SwarmMetadata): it contains aggregate information that can me derived from the torrent entries. +//! +//! A "swarm" is a network of peers that are trying to download the same torrent. +//! +//! The torrent entry contains the "swarm" data, which is basically the list of peers in the swarm. +//! That's the most valuable information the peer want to get from the tracker, because it allows them to +//! start downloading torrent from those peers. +//! +//! The "swarm metadata" contains aggregate data derived from the torrent entries. There two types of data: +//! +//! - For **active peers**: metrics related to the current active peers in the swarm. +//! - **Historical data**: since the tracker started running. +//! +//! The tracker collects metrics for: +//! +//! - The number of peers that have completed downloading the torrent since the tracker started collecting metrics. +//! - The number of peers that have completed downloading the torrent and are still active, that means they are actively participating in the network, +//! by announcing themselves periodically to the tracker. Since they have completed downloading they have a full copy of the torrent data. Peers with a +//! full copy of the data are called "seeders". +//! - The number of peers that have NOT completed downloading the torrent and are still active, that means they are actively participating in the network. +//! Peer that don not have a full copy of the torrent data are called "leechers". +//! +use torrust_tracker_torrent_repository::TorrentsSkipMapMutexStd; + +pub type Torrents = TorrentsSkipMapMutexStd; // Currently Used diff --git a/src/lib.rs b/src/lib.rs index c862d373a..d242ac80e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -103,35 +103,53 @@ //! ``` //! //! > **NOTICE**: those are the commands for `Ubuntu`. If you are using a -//! different OS, you will need to install the equivalent packages. Please -//! refer to the documentation of your OS. +//! > different OS, you will need to install the equivalent packages. Please +//! > refer to the documentation of your OS. //! //! With the default configuration you will need to create the `storage` directory: //! //! ```text -//! storage/ -//! ├── database -//! │   └── data.db -//! └── tls -//! ├── localhost.crt -//! └── localhost.key +//! ./storage/ +//! └── tracker +//! ├── etc +//! ├── lib +//! │   ├── database +//! │   │   └── sqlite3.db +//! │   └── tls +//! └── log //! ``` //! //! The default configuration expects a directory `./storage/tracker/lib/database` to be writable by the tracker process. //! -//! By default the tracker uses `SQLite` and the database file name `data.db`. +//! By default the tracker uses `SQLite` and the database file name `sqlite3.db`. //! //! You only need the `tls` directory in case you are setting up SSL for the HTTP tracker or the tracker API. //! Visit [`HTTP`](crate::servers::http) or [`API`](crate::servers::apis) if you want to know how you can use HTTPS. //! //! ## Install from sources //! +//! First, you need to create a folder to clone the repository. +//! +//! ```text +//! cd /tmp +//! mkdir torrust +//! ``` +//! //! ```text //! git clone https://github.com/torrust/torrust-tracker.git \ //! && cd torrust-tracker \ //! && cargo build --release \ +//! && mkdir -p ./storage/tracker/etc \ //! && mkdir -p ./storage/tracker/lib/database \ -//! && mkdir -p ./storage/tracker/lib/tls +//! && mkdir -p ./storage/tracker/lib/tls \ +//! && mkdir -p ./storage/tracker/log +//! ``` +//! +//! To run the tracker we will have to use the command "cargo run" this will +//! compile and after being compiled it will start running the tracker. +//! +//! ```text +//! cargo run //! ``` //! //! ## Run with docker @@ -141,69 +159,75 @@ //! //! # Configuration //! -//! In order to run the tracker you need to provide the configuration. If you run the tracker without providing the configuration, -//! the tracker will generate the default configuration the first time you run it. It will generate a `tracker.toml` file with -//! in the root directory. +//! In order to run the tracker you need to provide the configuration. If you +//! run the tracker without providing the configuration, the tracker will +//! generate the default configuration the first time you run it. It will +//! generate a `tracker.toml` file with in the root directory. //! //! The default configuration is: //! //! ```toml -//! log_level = "info" -//! mode = "public" -//! db_driver = "Sqlite3" -//! db_path = "./storage/tracker/lib/database/sqlite3.db" -//! announce_interval = 120 -//! min_announce_interval = 120 -//! max_peer_timeout = 900 -//! on_reverse_proxy = false -//! external_ip = "0.0.0.0" -//! tracker_usage_statistics = true -//! persistent_torrent_completed_stat = false +//! [logging] +//! threshold = "info" +//! +//! [core] //! inactive_peer_cleanup_interval = 600 -//! remove_peerless_torrents = true +//! listed = false +//! private = false +//! tracker_usage_statistics = true //! -//! [[udp_trackers]] -//! enabled = false -//! bind_address = "0.0.0.0:6969" +//! [core.announce_policy] +//! interval = 120 +//! interval_min = 120 //! -//! [[http_trackers]] -//! enabled = false -//! bind_address = "0.0.0.0:7070" -//! ssl_enabled = false -//! ssl_cert_path = "" -//! ssl_key_path = "" +//! [core.database] +//! driver = "sqlite3" +//! path = "./storage/tracker/lib/database/sqlite3.db" //! -//! [http_api] -//! enabled = true -//! bind_address = "127.0.0.1:1212" -//! ssl_enabled = false -//! ssl_cert_path = "" -//! ssl_key_path = "" +//! [core.net] +//! external_ip = "0.0.0.0" +//! on_reverse_proxy = false //! -//! [http_api.access_tokens] -//! admin = "MyAccessToken" -//! ``` +//! [core.tracker_policy] +//! max_peer_timeout = 900 +//! persistent_torrent_completed_stat = false +//! remove_peerless_torrents = true //! -//! The default configuration includes one disabled UDP server, one disabled HTTP server and the enabled API. +//! [health_check_api] +//! bind_address = "127.0.0.1:1313" +//!``` //! -//! For more information about each service and options you can visit the documentation for the [torrust-tracker-configuration crate](https://docs.rs/torrust-tracker-configuration). +//! The default configuration includes one disabled UDP server, one disabled +//! HTTP server and the enabled API. //! -//! Alternatively to the `tracker.toml` file you can use one environment variable `TORRUST_TRACKER_CONFIG` to pass the configuration to the tracker: +//! For more information about each service and options you can visit the +//! documentation for the [torrust-tracker-configuration crate](https://docs.rs/torrust-tracker-configuration). +//! +//! Alternatively to the `tracker.toml` file you can use one environment +//! variable `TORRUST_TRACKER_CONFIG_TOML` to pass the configuration to the tracker: //! //! ```text -//! TORRUST_TRACKER_CONFIG=$(cat tracker.toml) -//! cargo run +//! TORRUST_TRACKER_CONFIG_TOML=$(cat ./share/default/config/tracker.development.sqlite3.toml) ./target/release/torrust-tracker //! ``` //! -//! In the previous example you are just setting the env var with the contents of the `tracker.toml` file. +//! In the previous example you are just setting the env var with the contents +//! of the `tracker.toml` file. //! -//! The env var contains the same data as the `tracker.toml`. It's particularly useful in you are [running the tracker with docker](https://github.com/torrust/torrust-tracker/tree/develop/docker). +//! The env var contains the same data as the `tracker.toml`. It's particularly +//! useful in you are [running the tracker with docker](https://github.com/torrust/torrust-tracker/tree/develop/docker). //! -//! > NOTE: The `TORRUST_TRACKER_CONFIG` env var has priority over the `tracker.toml` file. +//! > NOTICE: The `TORRUST_TRACKER_CONFIG_TOML` env var has priority over the `tracker.toml` file. +//! +//! By default, if you don’t specify any `tracker.toml` file, the application +//! will use `./share/default/config/tracker.development.sqlite3.toml`. +//! +//! > IMPORTANT: Every time you change the configuration you need to restart the +//! > service. //! //! # Usage //! -//! Running the tracker with the default configuration and enabling the UDP and HTTP trackers will expose the services on these URLs: +//! Running the tracker with the default configuration and enabling the UDP and +//! HTTP trackers will expose the services on these URLs: //! //! - REST API: //! - UDP tracker: @@ -215,11 +239,10 @@ //! //! ```toml //! [http_api] -//! enabled = true //! bind_address = "127.0.0.1:1212" -//! ssl_enabled = false -//! ssl_cert_path = "" -//! ssl_key_path = "" +//! +//! [http_api.access_tokens] +//! admin = "MyAccessToken" //! ``` //! //! By default it's enabled on port `1212`. You also need to add access tokens in the configuration: @@ -275,7 +298,6 @@ //! //! ```toml //! [[http_trackers]] -//! enabled = true //! bind_address = "0.0.0.0:7070" //! ``` //! @@ -370,7 +392,6 @@ //! //! ```toml //! [[udp_trackers]] -//! enabled = true //! bind_address = "0.0.0.0:6969" //! ``` //! @@ -384,7 +405,7 @@ //! //! Torrust Tracker has four main components: //! -//! - The core [`tracker`](crate::tracker) +//! - The core tracker [`core`] //! - The tracker REST [`API`](crate::servers::apis) //! - The [`UDP`](crate::servers::udp) tracker //! - The [`HTTP`](crate::servers::http) tracker @@ -402,7 +423,7 @@ //! - Statistics //! - Persistence //! -//! See [`tracker`](crate::tracker) for more details on the [`tracker`](crate::tracker) module. +//! See [`core`] for more details on the [`core`] module. //! //! ## Tracker API //! @@ -466,11 +487,36 @@ //! //! In addition to the production code documentation you can find a lot of //! examples on the integration and unit tests. + +use torrust_tracker_clock::{clock, time_extent}; + pub mod app; pub mod bootstrap; +pub mod console; +pub mod core; pub mod servers; pub mod shared; -pub mod tracker; #[macro_use] extern crate lazy_static; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; + +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type DefaultTimeExtentMaker = time_extent::WorkingTimeExtentMaker; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type DefaultTimeExtentMaker = time_extent::StoppedTimeExtentMaker; diff --git a/src/main.rs b/src/main.rs index 87c0fc367..ab2af65e2 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,16 +1,16 @@ -use log::info; use torrust_tracker::{app, bootstrap}; +use tracing::info; #[tokio::main] async fn main() { let (config, tracker) = bootstrap::app::setup(); - let jobs = app::start(config.clone(), tracker.clone()).await; + let jobs = app::start(&config, tracker).await; // handle the signals tokio::select! { _ = tokio::signal::ctrl_c() => { - info!("Torrust shutting down.."); + info!("Torrust shutting down ..."); // Await for all jobs to shutdown futures::future::join_all(jobs).await; diff --git a/src/servers/apis/mod.rs b/src/servers/apis/mod.rs index afed9ff12..0451b46c0 100644 --- a/src/servers/apis/mod.rs +++ b/src/servers/apis/mod.rs @@ -1,9 +1,9 @@ //! The tracker REST API with all its versions. //! //! > **NOTICE**: This API should not be exposed directly to the internet, it is -//! intended for internal use only. +//! > intended for internal use only. //! -//! Endpoints for the latest API: [v1](crate::servers::apis::v1). +//! Endpoints for the latest API: [v1]. //! //! All endpoints require an authorization token which must be set in the //! configuration before running the tracker. The default configuration uses @@ -25,9 +25,9 @@ //! //! ```toml //! [http_api] -//! enabled = true //! bind_address = "0.0.0.0:1212" -//! ssl_enabled = false +//! +//! [http_api.tsl_config] //! ssl_cert_path = "./storage/tracker/lib/tls/localhost.crt" //! ssl_key_path = "./storage/tracker/lib/tls/localhost.key" //! @@ -42,7 +42,7 @@ //! //! ```text //! Loading configuration from config file ./tracker.toml -//! 023-03-28T12:19:24.963054069+01:00 [torrust_tracker::bootstrap::logging][INFO] logging initialized. +//! 023-03-28T12:19:24.963054069+01:00 [torrust_tracker::bootstrap::logging][INFO] Logging initialized //! ... //! 023-03-28T12:19:24.964138723+01:00 [torrust_tracker::bootstrap::jobs::tracker_apis][INFO] Starting Torrust APIs server on: http://0.0.0.0:1212 //! ``` @@ -106,16 +106,14 @@ //! //! # Setup SSL (optional) //! -//! The API server supports SSL. You can enable it by setting the -//! [`ssl_enabled`](torrust_tracker_configuration::HttpApi::ssl_enabled) option -//! to `true` in the configuration file -//! ([`http_api`](torrust_tracker_configuration::HttpApi) section). +//! The API server supports SSL. You can enable it by adding the `tsl_config` +//! section to the configuration. //! //! ```toml //! [http_api] -//! enabled = true //! bind_address = "0.0.0.0:1212" -//! ssl_enabled = true +//! +//! [http_api.tsl_config] //! ssl_cert_path = "./storage/tracker/lib/tls/localhost.crt" //! ssl_key_path = "./storage/tracker/lib/tls/localhost.key" //! @@ -124,28 +122,28 @@ //! ``` //! //! > **NOTICE**: If you are using a reverse proxy like NGINX, you can skip this -//! step and use NGINX for the SSL instead. See -//! [other alternatives to Nginx/certbot](https://github.com/torrust/torrust-tracker/discussions/131) +//! > step and use NGINX for the SSL instead. See +//! > [other alternatives to Nginx/certbot](https://github.com/torrust/torrust-tracker/discussions/131) //! //! > **NOTICE**: You can generate a self-signed certificate for localhost using -//! OpenSSL. See [Let's Encrypt](https://letsencrypt.org/docs/certificates-for-localhost/). -//! That's particularly useful for testing purposes. Once you have the certificate -//! you need to set the [`ssl_cert_path`](torrust_tracker_configuration::HttpApi::ssl_cert_path) -//! and [`ssl_key_path`](torrust_tracker_configuration::HttpApi::ssl_key_path) -//! options in the configuration file with the paths to the certificate -//! (`localhost.crt`) and key (`localhost.key`) files. +//! > OpenSSL. See [Let's Encrypt](https://letsencrypt.org/docs/certificates-for-localhost/). +//! > That's particularly useful for testing purposes. Once you have the certificate +//! > you need to set the [`ssl_cert_path`](torrust_tracker_configuration::HttpApi::tsl_config.ssl_cert_path) +//! > and [`ssl_key_path`](torrust_tracker_configuration::HttpApi::tsl_config.ssl_key_path) +//! > options in the configuration file with the paths to the certificate +//! > (`localhost.crt`) and key (`localhost.key`) files. //! //! # Versioning //! //! The API is versioned and each version has its own module. //! The API server runs all the API versions on the same server using -//! the same port. Currently there is only one API version: [v1](crate::servers::apis::v1) +//! the same port. Currently there is only one API version: [v1] //! but a version [`v2`](https://github.com/torrust/torrust-tracker/issues/144) //! is planned. //! //! # Endpoints //! -//! Refer to the [v1](crate::servers::apis::v1) module for the list of available +//! Refer to the [v1] module for the list of available //! API endpoints. //! //! # Documentation @@ -153,13 +151,15 @@ //! If you want to contribute to this documentation you can [open a new pull request](https://github.com/torrust/torrust-tracker/pulls). //! //! > **NOTICE**: we are using [curl](https://curl.se/) in the API examples. -//! And you have to use quotes around the URL in order to avoid unexpected -//! errors. For example: `curl "http://127.0.0.1:1212/api/v1/stats?token=MyAccessToken"`. +//! > And you have to use quotes around the URL in order to avoid unexpected +//! > errors. For example: `curl "http://127.0.0.1:1212/api/v1/stats?token=MyAccessToken"`. pub mod routes; pub mod server; pub mod v1; -use serde::Deserialize; +use serde::{Deserialize, Serialize}; + +pub const API_LOG_TARGET: &str = "API"; /// The info hash URL path parameter. /// @@ -172,3 +172,10 @@ use serde::Deserialize; /// in order to provide a more specific error message. #[derive(Deserialize)] pub struct InfoHashParam(pub String); + +/// The version of the HTTP Api. +#[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] +pub enum Version { + /// The `v1` version of the HTTP Api. + V1, +} diff --git a/src/servers/apis/routes.rs b/src/servers/apis/routes.rs index 7801389f3..4901d760d 100644 --- a/src/servers/apis/routes.rs +++ b/src/servers/apis/routes.rs @@ -6,26 +6,82 @@ //! All the API routes have the `/api` prefix and the version number as the //! first path segment. For example: `/api/v1/torrents`. use std::sync::Arc; +use std::time::Duration; -use axum::{middleware, Router}; +use axum::error_handling::HandleErrorLayer; +use axum::http::HeaderName; +use axum::response::Response; +use axum::routing::get; +use axum::{middleware, BoxError, Router}; +use hyper::{Request, StatusCode}; +use torrust_tracker_configuration::{AccessTokens, DEFAULT_TIMEOUT}; +use tower::timeout::TimeoutLayer; +use tower::ServiceBuilder; use tower_http::compression::CompressionLayer; +use tower_http::propagate_header::PropagateHeaderLayer; +use tower_http::request_id::{MakeRequestUuid, SetRequestIdLayer}; +use tower_http::trace::{DefaultMakeSpan, TraceLayer}; +use tracing::{Level, Span}; use super::v1; -use crate::tracker::Tracker; +use super::v1::context::health_check::handlers::health_check_handler; +use super::v1::middlewares::auth::State; +use crate::core::Tracker; +use crate::servers::apis::API_LOG_TARGET; /// Add all API routes to the router. #[allow(clippy::needless_pass_by_value)] -pub fn router(tracker: Arc) -> Router { +pub fn router(tracker: Arc, access_tokens: Arc) -> Router { let router = Router::new(); - let prefix = "/api"; + let api_url_prefix = "/api"; - let router = v1::routes::add(prefix, router, tracker.clone()); + let router = v1::routes::add(api_url_prefix, router, tracker.clone()); + + let state = State { access_tokens }; router - .layer(middleware::from_fn_with_state( - tracker.config.clone(), - v1::middlewares::auth::auth, - )) + .layer(middleware::from_fn_with_state(state, v1::middlewares::auth::auth)) + .route(&format!("{api_url_prefix}/health_check"), get(health_check_handler)) .layer(CompressionLayer::new()) + .layer(SetRequestIdLayer::x_request_id(MakeRequestUuid)) + .layer(PropagateHeaderLayer::new(HeaderName::from_static("x-request-id"))) + .layer( + TraceLayer::new_for_http() + .make_span_with(DefaultMakeSpan::new().level(Level::INFO)) + .on_request(|request: &Request, _span: &Span| { + let method = request.method().to_string(); + let uri = request.uri().to_string(); + let request_id = request + .headers() + .get("x-request-id") + .map(|v| v.to_str().unwrap_or_default()) + .unwrap_or_default(); + + tracing::span!( + target: API_LOG_TARGET, + tracing::Level::INFO, "request", method = %method, uri = %uri, request_id = %request_id); + }) + .on_response(|response: &Response, latency: Duration, _span: &Span| { + let status_code = response.status(); + let request_id = response + .headers() + .get("x-request-id") + .map(|v| v.to_str().unwrap_or_default()) + .unwrap_or_default(); + let latency_ms = latency.as_millis(); + + tracing::span!( + target: API_LOG_TARGET, + tracing::Level::INFO, "response", latency = %latency_ms, status = %status_code, request_id = %request_id); + }), + ) + .layer(SetRequestIdLayer::x_request_id(MakeRequestUuid)) + .layer( + ServiceBuilder::new() + // this middleware goes above `TimeoutLayer` because it will receive + // errors returned by `TimeoutLayer` + .layer(HandleErrorLayer::new(|_: BoxError| async { StatusCode::REQUEST_TIMEOUT })) + .layer(TimeoutLayer::new(DEFAULT_TIMEOUT)), + ) } diff --git a/src/servers/apis/server.rs b/src/servers/apis/server.rs index 778a17d90..40c4d0779 100644 --- a/src/servers/apis/server.rs +++ b/src/servers/apis/server.rs @@ -24,18 +24,24 @@ /// for example, to restart it to apply new configuration changes, to remotely /// shutdown the server, etc. use std::net::SocketAddr; -use std::str::FromStr; use std::sync::Arc; use axum_server::tls_rustls::RustlsConfig; use axum_server::Handle; +use derive_more::Constructor; use futures::future::BoxFuture; -use futures::Future; -use log::info; +use tokio::sync::oneshot::{Receiver, Sender}; +use torrust_tracker_configuration::AccessTokens; +use tracing::{debug, error, info}; use super::routes::router; -use crate::servers::signals::shutdown_signal; -use crate::tracker::Tracker; +use crate::bootstrap::jobs::Started; +use crate::core::Tracker; +use crate::servers::apis::API_LOG_TARGET; +use crate::servers::custom_axum_server::{self, TimeoutAcceptor}; +use crate::servers::logging::STARTED_ON; +use crate::servers::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; +use crate::servers::signals::{graceful_shutdown, Halted}; /// Errors that can occur when starting or stopping the API server. #[derive(Debug)] @@ -58,24 +64,42 @@ pub type RunningApiServer = ApiServer; /// states: `Stopped` or `Running`. #[allow(clippy::module_name_repetitions)] pub struct ApiServer { - pub cfg: torrust_tracker_configuration::HttpApi, pub state: S, } /// The `Stopped` state of the `ApiServer` struct. -pub struct Stopped; +pub struct Stopped { + launcher: Launcher, +} /// The `Running` state of the `ApiServer` struct. pub struct Running { - pub bind_addr: SocketAddr, - task_killer: tokio::sync::oneshot::Sender, - task: tokio::task::JoinHandle<()>, + pub binding: SocketAddr, + pub halt_task: tokio::sync::oneshot::Sender, + pub task: tokio::task::JoinHandle, +} + +impl Running { + #[must_use] + pub fn new( + binding: SocketAddr, + halt_task: tokio::sync::oneshot::Sender, + task: tokio::task::JoinHandle, + ) -> Self { + Self { + binding, + halt_task, + task, + } + } } impl ApiServer { #[must_use] - pub fn new(cfg: torrust_tracker_configuration::HttpApi) -> Self { - Self { cfg, state: Stopped {} } + pub fn new(launcher: Launcher) -> Self { + Self { + state: Stopped { launcher }, + } } /// Starts the API server with the given configuration. @@ -87,32 +111,44 @@ impl ApiServer { /// # Panics /// /// It would panic if the bound socket address cannot be sent back to this starter. - pub async fn start(self, tracker: Arc) -> Result, Error> { - let (shutdown_sender, shutdown_receiver) = tokio::sync::oneshot::channel::(); - let (addr_sender, addr_receiver) = tokio::sync::oneshot::channel::(); + pub async fn start( + self, + tracker: Arc, + form: ServiceRegistrationForm, + access_tokens: Arc, + ) -> Result, Error> { + let (tx_start, rx_start) = tokio::sync::oneshot::channel::(); + let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); - let configuration = self.cfg.clone(); + let launcher = self.state.launcher; let task = tokio::spawn(async move { - let (bind_addr, server) = Launcher::start(&configuration, tracker, shutdown_signal(shutdown_receiver)); + debug!(target: API_LOG_TARGET, "Starting with launcher in spawned task ..."); - addr_sender.send(bind_addr).expect("Could not return SocketAddr."); + let _task = launcher.start(tracker, access_tokens, tx_start, rx_halt).await; - server.await; - }); + debug!(target: API_LOG_TARGET, "Started with launcher in spawned task"); - let bind_address = addr_receiver - .await - .map_err(|_| Error::Error("Could not receive bind_address.".to_string()))?; + launcher + }); - Ok(ApiServer { - cfg: self.cfg, - state: Running { - bind_addr: bind_address, - task_killer: shutdown_sender, - task, - }, - }) + let api_server = match rx_start.await { + Ok(started) => { + form.send(ServiceRegistration::new(started.address, check_fn)) + .expect("it should be able to send service registration"); + + ApiServer { + state: Running::new(started.address, tx_halt, task), + } + } + Err(err) => { + let msg = format!("Unable to start API server: {err}"); + error!("{}", msg); + panic!("{}", msg); + } + }; + + Ok(api_server) } } @@ -124,21 +160,45 @@ impl ApiServer { /// It would return an error if the channel for the task killer signal was closed. pub async fn stop(self) -> Result, Error> { self.state - .task_killer - .send(0) + .halt_task + .send(Halted::Normal) .map_err(|_| Error::Error("Task killer channel was closed.".to_string()))?; - drop(self.state.task.await); + let launcher = self.state.task.await.map_err(|e| Error::Error(e.to_string()))?; Ok(ApiServer { - cfg: self.cfg, - state: Stopped {}, + state: Stopped { launcher }, }) } } +/// Checks the Health by connecting to the API service endpoint. +/// +/// # Errors +/// +/// This function will return an error if unable to connect. +/// Or if there request returns an error code. +#[must_use] +pub fn check_fn(binding: &SocketAddr) -> ServiceHealthCheckJob { + let url = format!("http://{binding}/api/health_check"); // DevSkim: ignore DS137138 + + let info = format!("checking api health check at: {url}"); + + let job = tokio::spawn(async move { + match reqwest::get(url).await { + Ok(response) => Ok(response.status().to_string()), + Err(err) => Err(err.to_string()), + } + }); + ServiceHealthCheckJob::new(*binding, info, job) +} + /// A struct responsible for starting the API server. -struct Launcher; +#[derive(Constructor, Debug)] +pub struct Launcher { + bind_to: SocketAddr, + tls: Option, +} impl Launcher { /// Starts the API server with graceful shutdown. @@ -146,163 +206,100 @@ impl Launcher { /// If TLS is enabled in the configuration, it will start the server with /// TLS. See [`torrust-tracker-configuration`](torrust_tracker_configuration) /// for more information about configuration. - pub fn start( - cfg: &torrust_tracker_configuration::HttpApi, - tracker: Arc, - shutdown_signal: F, - ) -> (SocketAddr, BoxFuture<'static, ()>) - where - F: Future + Send + 'static, - { - let addr = SocketAddr::from_str(&cfg.bind_address).expect("bind_address is not a valid SocketAddr."); - let tcp_listener = std::net::TcpListener::bind(addr).expect("Could not bind tcp_listener to address."); - let bind_addr = tcp_listener - .local_addr() - .expect("Could not get local_addr from tcp_listener."); - - if let (true, Some(ssl_cert_path), Some(ssl_key_path)) = (&cfg.ssl_enabled, &cfg.ssl_cert_path, &cfg.ssl_key_path) { - let server = Self::start_tls_with_graceful_shutdown( - tcp_listener, - (ssl_cert_path.to_string(), ssl_key_path.to_string()), - tracker, - shutdown_signal, - ); - - (bind_addr, server) - } else { - let server = Self::start_with_graceful_shutdown(tcp_listener, tracker, shutdown_signal); - - (bind_addr, server) - } - } - - /// Starts the API server with graceful shutdown. - pub fn start_with_graceful_shutdown( - tcp_listener: std::net::TcpListener, - tracker: Arc, - shutdown_signal: F, - ) -> BoxFuture<'static, ()> - where - F: Future + Send + 'static, - { - let app = router(tracker); - - Box::pin(async { - axum::Server::from_tcp(tcp_listener) - .expect("Could not bind to tcp listener.") - .serve(app.into_make_service_with_connect_info::()) - .with_graceful_shutdown(shutdown_signal) - .await - .expect("Axum server crashed."); - }) - } - - /// Starts the API server with graceful shutdown and TLS. - pub fn start_tls_with_graceful_shutdown( - tcp_listener: std::net::TcpListener, - (ssl_cert_path, ssl_key_path): (String, String), + /// + /// # Panics + /// + /// Will panic if unable to bind to the socket, or unable to get the address of the bound socket. + /// Will also panic if unable to send message regarding the bound socket address. + pub fn start( + &self, tracker: Arc, - shutdown_signal: F, - ) -> BoxFuture<'static, ()> - where - F: Future + Send + 'static, - { - let app = router(tracker); + access_tokens: Arc, + tx_start: Sender, + rx_halt: Receiver, + ) -> BoxFuture<'static, ()> { + let router = router(tracker, access_tokens); + let socket = std::net::TcpListener::bind(self.bind_to).expect("Could not bind tcp_listener to address."); + let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); let handle = Handle::new(); - let cloned_handle = handle.clone(); - - tokio::task::spawn_local(async move { - shutdown_signal.await; - cloned_handle.shutdown(); + tokio::task::spawn(graceful_shutdown( + handle.clone(), + rx_halt, + format!("Shutting down tracker API server on socket address: {address}"), + )); + + let tls = self.tls.clone(); + let protocol = if tls.is_some() { "https" } else { "http" }; + + info!(target: API_LOG_TARGET, "Starting on {protocol}://{}", address); + + let running = Box::pin(async { + match tls { + Some(tls) => custom_axum_server::from_tcp_rustls_with_timeouts(socket, tls) + .handle(handle) + // The TimeoutAcceptor is commented because TSL does not work with it. + // See: https://github.com/torrust/torrust-index/issues/204#issuecomment-2115529214 + //.acceptor(TimeoutAcceptor) + .serve(router.into_make_service_with_connect_info::()) + .await + .expect("Axum server for tracker API crashed."), + None => custom_axum_server::from_tcp_with_timeouts(socket) + .handle(handle) + .acceptor(TimeoutAcceptor) + .serve(router.into_make_service_with_connect_info::()) + .await + .expect("Axum server for tracker API crashed."), + } }); - Box::pin(async { - let tls_config = RustlsConfig::from_pem_file(ssl_cert_path, ssl_key_path) - .await - .expect("Could not read tls cert."); - - axum_server::from_tcp_rustls(tcp_listener, tls_config) - .handle(handle) - .serve(app.into_make_service_with_connect_info::()) - .await - .expect("Axum server crashed."); - }) - } -} - -/// Starts the API server with graceful shutdown on the current thread. -/// -/// # Panics -/// -/// It would panic if it fails to listen to shutdown signal. -pub fn start(socket_addr: SocketAddr, tracker: Arc) -> impl Future> { - let app = router(tracker); - - let server = axum::Server::bind(&socket_addr).serve(app.into_make_service()); + info!(target: API_LOG_TARGET, "{STARTED_ON} {protocol}://{}", address); - server.with_graceful_shutdown(async move { - tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); - info!("Stopping Torrust APIs server on http://{} ...", socket_addr); - }) -} + tx_start + .send(Started { address }) + .expect("the HTTP(s) Tracker API service should not be dropped"); -/// Starts the API server with graceful shutdown and TLS on the current thread. -/// -/// # Panics -/// -/// It would panic if it fails to listen to shutdown signal. -pub fn start_tls( - socket_addr: SocketAddr, - ssl_config: RustlsConfig, - tracker: Arc, -) -> impl Future> { - let app = router(tracker); - - let handle = Handle::new(); - let shutdown_handle = handle.clone(); - - tokio::spawn(async move { - tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); - info!("Stopping Torrust APIs server on https://{} ...", socket_addr); - shutdown_handle.shutdown(); - }); - - axum_server::bind_rustls(socket_addr, ssl_config) - .handle(handle) - .serve(app.into_make_service()) + running + } } #[cfg(test)] mod tests { use std::sync::Arc; - use torrust_tracker_configuration::Configuration; - use torrust_tracker_test_helpers::configuration; + use torrust_tracker_test_helpers::configuration::ephemeral_public; - use crate::servers::apis::server::ApiServer; - use crate::tracker; - use crate::tracker::statistics; - - fn tracker_configuration() -> Arc { - Arc::new(configuration::ephemeral()) - } + use crate::bootstrap::app::initialize_with_configuration; + use crate::bootstrap::jobs::make_rust_tls; + use crate::servers::apis::server::{ApiServer, Launcher}; + use crate::servers::registar::Registar; #[tokio::test] - async fn it_should_be_able_to_start_from_stopped_state_and_then_stop_again() { - let cfg = tracker_configuration(); + async fn it_should_be_able_to_start_and_stop() { + let cfg = Arc::new(ephemeral_public()); + let config = &cfg.http_api.clone().unwrap(); + + let tracker = initialize_with_configuration(&cfg); - let tracker = Arc::new(tracker::Tracker::new(cfg.clone(), None, statistics::Repo::new()).unwrap()); + let bind_to = config.bind_address; - let stopped_api_server = ApiServer::new(cfg.http_api.clone()); + let tls = make_rust_tls(&config.tsl_config) + .await + .map(|tls| tls.expect("tls config failed")); + + let access_tokens = Arc::new(config.access_tokens.clone()); - let running_api_server_result = stopped_api_server.start(tracker).await; + let stopped = ApiServer::new(Launcher::new(bind_to, tls)); - assert!(running_api_server_result.is_ok()); + let register = &Registar::default(); - let running_api_server = running_api_server_result.unwrap(); + let started = stopped + .start(tracker, register.give_form(), access_tokens) + .await + .expect("it should start the server"); + let stopped = started.stop().await.expect("it should stop the server"); - assert!(running_api_server.stop().await.is_ok()); + assert_eq!(stopped.state.launcher.bind_to, bind_to); } } diff --git a/src/servers/apis/v1/context/auth_key/forms.rs b/src/servers/apis/v1/context/auth_key/forms.rs new file mode 100644 index 000000000..5dfea6e80 --- /dev/null +++ b/src/servers/apis/v1/context/auth_key/forms.rs @@ -0,0 +1,22 @@ +use serde::{Deserialize, Serialize}; +use serde_with::{serde_as, DefaultOnNull}; + +/// This type contains the info needed to add a new tracker key. +/// +/// You can upload a pre-generated key or let the app to generate a new one. +/// You can also set an expiration date or leave it empty (`None`) if you want +/// to create permanent key that does not expire. +#[serde_as] +#[derive(Serialize, Deserialize, Debug)] +pub struct AddKeyForm { + /// The pre-generated key. Use `None` (null in json) to generate a random key. + #[serde_as(deserialize_as = "DefaultOnNull")] + #[serde(rename = "key")] + pub opt_key: Option, + + /// How long the key will be valid in seconds. Use `None` (null in json) for + /// permanent keys. + #[serde_as(deserialize_as = "DefaultOnNull")] + #[serde(rename = "seconds_valid")] + pub opt_seconds_valid: Option, +} diff --git a/src/servers/apis/v1/context/auth_key/handlers.rs b/src/servers/apis/v1/context/auth_key/handlers.rs index 85158c698..fed3ad301 100644 --- a/src/servers/apis/v1/context/auth_key/handlers.rs +++ b/src/servers/apis/v1/context/auth_key/handlers.rs @@ -3,32 +3,71 @@ use std::str::FromStr; use std::sync::Arc; use std::time::Duration; -use axum::extract::{Path, State}; +use axum::extract::{self, Path, State}; use axum::response::Response; use serde::Deserialize; +use super::forms::AddKeyForm; use super::responses::{ auth_key_response, failed_to_delete_key_response, failed_to_generate_key_response, failed_to_reload_keys_response, + invalid_auth_key_duration_response, invalid_auth_key_response, }; +use crate::core::auth::Key; +use crate::core::{AddKeyRequest, Tracker}; use crate::servers::apis::v1::context::auth_key::resources::AuthKey; use crate::servers::apis::v1::responses::{invalid_auth_key_param_response, ok_response}; -use crate::tracker::auth::Key; -use crate::tracker::Tracker; + +/// It handles the request to add a new authentication key. +/// +/// It returns these types of responses: +/// +/// - `200` with a json [`AuthKey`] +/// resource. If the key was generated successfully. +/// - `400` with an error if the key couldn't been added because of an invalid +/// request. +/// - `500` with serialized error in debug format. If the key couldn't be +/// generated. +/// +/// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::auth_key#generate-a-new-authentication-key) +/// for more information about this endpoint. +pub async fn add_auth_key_handler( + State(tracker): State>, + extract::Json(add_key_form): extract::Json, +) -> Response { + match tracker + .add_peer_key(AddKeyRequest { + opt_key: add_key_form.opt_key.clone(), + opt_seconds_valid: add_key_form.opt_seconds_valid, + }) + .await + { + Ok(auth_key) => auth_key_response(&AuthKey::from(auth_key)), + Err(err) => match err { + crate::core::error::PeerKeyError::DurationOverflow { seconds_valid } => { + invalid_auth_key_duration_response(seconds_valid) + } + crate::core::error::PeerKeyError::InvalidKey { key, source } => invalid_auth_key_response(&key, source), + crate::core::error::PeerKeyError::DatabaseError { source } => failed_to_generate_key_response(source), + }, + } +} /// It handles the request to generate a new authentication key. /// /// It returns two types of responses: /// -/// - `200` with an json [`AuthKey`](crate::servers::apis::v1::context::auth_key::resources::AuthKey) +/// - `200` with an json [`AuthKey`] /// resource. If the key was generated successfully. /// - `500` with serialized error in debug format. If the key couldn't be /// generated. /// /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::auth_key#generate-a-new-authentication-key) /// for more information about this endpoint. +/// +/// This endpoint has been deprecated. Use [`add_auth_key_handler`]. pub async fn generate_auth_key_handler(State(tracker): State>, Path(seconds_valid_or_key): Path) -> Response { let seconds_valid = seconds_valid_or_key; - match tracker.generate_auth_key(Duration::from_secs(seconds_valid)).await { + match tracker.generate_auth_key(Some(Duration::from_secs(seconds_valid))).await { Ok(auth_key) => auth_key_response(&AuthKey::from(auth_key)), Err(e) => failed_to_generate_key_response(e), } @@ -46,7 +85,7 @@ pub async fn generate_auth_key_handler(State(tracker): State>, Path /// /// - `POST /api/v1/key/120`. It will generate a new key valid for two minutes. /// - `DELETE /api/v1/key/xqD6NWH9TcKrOCwDmqcdH5hF5RrbL0A6`. It will delete the -/// key `xqD6NWH9TcKrOCwDmqcdH5hF5RrbL0A6`. +/// key `xqD6NWH9TcKrOCwDmqcdH5hF5RrbL0A6`. /// /// > **NOTICE**: this may change in the future, in the [API v2](https://github.com/torrust/torrust-tracker/issues/144). #[derive(Deserialize)] diff --git a/src/servers/apis/v1/context/auth_key/mod.rs b/src/servers/apis/v1/context/auth_key/mod.rs index 11bc8a43f..b4112f21f 100644 --- a/src/servers/apis/v1/context/auth_key/mod.rs +++ b/src/servers/apis/v1/context/auth_key/mod.rs @@ -3,8 +3,8 @@ //! Authentication keys are used to authenticate HTTP tracker `announce` and //! `scrape` requests. //! -//! When the tracker is running in `private` or `private_listed` mode, the -//! authentication keys are required to announce and scrape torrents. +//! When the tracker is running in `private` mode, the authentication keys are +//! required to announce and scrape torrents. //! //! A sample `announce` request **without** authentication key: //! @@ -22,22 +22,29 @@ //! //! # Generate a new authentication key //! -//! `POST /key/:seconds_valid` +//! `POST /keys` //! -//! It generates a new authentication key. +//! It generates a new authentication key or upload a pre-generated key. //! -//! > **NOTICE**: keys expire after a certain amount of time. -//! -//! **Path parameters** +//! **POST parameters** //! //! Name | Type | Description | Required | Example //! ---|---|---|---|--- -//! `seconds_valid` | positive integer | The number of seconds the key will be valid. | Yes | `3600` +//! `key` | 32-char string (0-9, a-z, A-Z) or `null` | The optional pre-generated key. | Yes | `Xc1L4PbQJSFGlrgSRZl8wxSFAuMa21z7` or `null` +//! `seconds_valid` | positive integer or `null` | The number of seconds the key will be valid. | Yes | `3600` or `null` +//! +//! > **NOTICE**: the `key` and `seconds_valid` fields are optional. If `key` is not provided the tracker +//! > will generated a random one. If `seconds_valid` field is not provided the key will be permanent. You can use the `null` value. //! //! **Example request** //! //! ```bash -//! curl -X POST "http://127.0.0.1:1212/api/v1/key/120?token=MyAccessToken" +//! curl -X POST http://localhost:1212/api/v1/keys?token=MyAccessToken \ +//! -H "Content-Type: application/json" \ +//! -d '{ +//! "key": "xqD6NWH9TcKrOCwDmqcdH5hF5RrbL0A6", +//! "seconds_valid": 7200 +//! }' //! ``` //! //! **Example response** `200` @@ -51,9 +58,9 @@ //! ``` //! //! > **NOTICE**: `valid_until` and `expiry_time` represent the same time. -//! `valid_until` is the number of seconds since the Unix epoch -//! ([timestamp](https://en.wikipedia.org/wiki/Timestamp)), while `expiry_time` -//! is the human-readable time ([ISO 8601](https://www.iso.org/iso-8601-date-and-time-format.html)). +//! > `valid_until` is the number of seconds since the Unix epoch +//! > ([timestamp](https://en.wikipedia.org/wiki/Timestamp)), while `expiry_time` +//! > is the human-readable time ([ISO 8601](https://www.iso.org/iso-8601-date-and-time-format.html)). //! //! **Resource** //! @@ -96,8 +103,8 @@ //! ``` //! //! > **NOTICE**: a `500` status code will be returned and the body is not a -//! valid JSON. It's a text body containing the serialized-to-display error -//! message. +//! > valid JSON. It's a text body containing the serialized-to-display error +//! > message. //! //! # Reload authentication keys //! @@ -119,6 +126,7 @@ //! "status": "ok" //! } //! ``` +pub mod forms; pub mod handlers; pub mod resources; pub mod responses; diff --git a/src/servers/apis/v1/context/auth_key/resources.rs b/src/servers/apis/v1/context/auth_key/resources.rs index 5099fad8b..c26b2c4d3 100644 --- a/src/servers/apis/v1/context/auth_key/resources.rs +++ b/src/servers/apis/v1/context/auth_key/resources.rs @@ -1,10 +1,9 @@ //! API resources for the [`auth_key`](crate::servers::apis::v1::context::auth_key) API context. -use std::convert::From; use serde::{Deserialize, Serialize}; +use torrust_tracker_clock::conv::convert_from_iso_8601_to_timestamp; -use crate::shared::clock::convert_from_iso_8601_to_timestamp; -use crate::tracker::auth::{self, Key}; +use crate::core::auth::{self, Key}; /// A resource that represents an authentication key. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] @@ -13,27 +12,36 @@ pub struct AuthKey { pub key: String, /// The timestamp when the key will expire. #[deprecated(since = "3.0.0", note = "please use `expiry_time` instead")] - pub valid_until: u64, // todo: remove when the torrust-index-backend starts using the `expiry_time` attribute. + pub valid_until: Option, // todo: remove when the torrust-index-backend starts using the `expiry_time` attribute. /// The ISO 8601 timestamp when the key will expire. - pub expiry_time: String, + pub expiry_time: Option, } -impl From for auth::ExpiringKey { +impl From for auth::PeerKey { fn from(auth_key_resource: AuthKey) -> Self { - auth::ExpiringKey { + auth::PeerKey { key: auth_key_resource.key.parse::().unwrap(), - valid_until: convert_from_iso_8601_to_timestamp(&auth_key_resource.expiry_time), + valid_until: auth_key_resource + .expiry_time + .map(|expiry_time| convert_from_iso_8601_to_timestamp(&expiry_time)), } } } #[allow(deprecated)] -impl From for AuthKey { - fn from(auth_key: auth::ExpiringKey) -> Self { - AuthKey { - key: auth_key.key.to_string(), - valid_until: auth_key.valid_until.as_secs(), - expiry_time: auth_key.expiry_time().to_string(), +impl From for AuthKey { + fn from(auth_key: auth::PeerKey) -> Self { + match (auth_key.valid_until, auth_key.expiry_time()) { + (Some(valid_until), Some(expiry_time)) => AuthKey { + key: auth_key.key.to_string(), + valid_until: Some(valid_until.as_secs()), + expiry_time: Some(expiry_time.to_string()), + }, + _ => AuthKey { + key: auth_key.key.to_string(), + valid_until: None, + expiry_time: None, + }, } } } @@ -42,9 +50,12 @@ impl From for AuthKey { mod tests { use std::time::Duration; + use torrust_tracker_clock::clock::stopped::Stopped as _; + use torrust_tracker_clock::clock::{self, Time}; + use super::AuthKey; - use crate::shared::clock::{Current, TimeNow}; - use crate::tracker::auth::{self, Key}; + use crate::core::auth::{self, Key}; + use crate::CurrentClock; struct TestTime { pub timestamp: u64, @@ -66,17 +77,19 @@ mod tests { #[test] #[allow(deprecated)] fn it_should_be_convertible_into_an_auth_key() { + clock::Stopped::local_set_to_unix_epoch(); + let auth_key_resource = AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line - valid_until: one_hour_after_unix_epoch().timestamp, - expiry_time: one_hour_after_unix_epoch().iso_8601_v1, + valid_until: Some(one_hour_after_unix_epoch().timestamp), + expiry_time: Some(one_hour_after_unix_epoch().iso_8601_v1), }; assert_eq!( - auth::ExpiringKey::from(auth_key_resource), - auth::ExpiringKey { + auth::PeerKey::from(auth_key_resource), + auth::PeerKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line - valid_until: Current::add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap() + valid_until: Some(CurrentClock::now_add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap()) } ); } @@ -84,17 +97,19 @@ mod tests { #[test] #[allow(deprecated)] fn it_should_be_convertible_from_an_auth_key() { - let auth_key = auth::ExpiringKey { + clock::Stopped::local_set_to_unix_epoch(); + + let auth_key = auth::PeerKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".parse::().unwrap(), // cspell:disable-line - valid_until: Current::add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap(), + valid_until: Some(CurrentClock::now_add(&Duration::new(one_hour_after_unix_epoch().timestamp, 0)).unwrap()), }; assert_eq!( AuthKey::from(auth_key), AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line - valid_until: one_hour_after_unix_epoch().timestamp, - expiry_time: one_hour_after_unix_epoch().iso_8601_v2, + valid_until: Some(one_hour_after_unix_epoch().timestamp), + expiry_time: Some(one_hour_after_unix_epoch().iso_8601_v2), } ); } @@ -105,8 +120,8 @@ mod tests { assert_eq!( serde_json::to_string(&AuthKey { key: "IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM".to_string(), // cspell:disable-line - valid_until: one_hour_after_unix_epoch().timestamp, - expiry_time: one_hour_after_unix_epoch().iso_8601_v1, + valid_until: Some(one_hour_after_unix_epoch().timestamp), + expiry_time: Some(one_hour_after_unix_epoch().iso_8601_v1), }) .unwrap(), "{\"key\":\"IaWDneuFNZi8IB4MPA3qW1CD0M30EZSM\",\"valid_until\":60,\"expiry_time\":\"1970-01-01T00:01:00.000Z\"}" // cspell:disable-line diff --git a/src/servers/apis/v1/context/auth_key/responses.rs b/src/servers/apis/v1/context/auth_key/responses.rs index 51be162c5..4905d9adc 100644 --- a/src/servers/apis/v1/context/auth_key/responses.rs +++ b/src/servers/apis/v1/context/auth_key/responses.rs @@ -5,7 +5,7 @@ use axum::http::{header, StatusCode}; use axum::response::{IntoResponse, Response}; use crate::servers::apis::v1::context::auth_key::resources::AuthKey; -use crate::servers::apis::v1::responses::unhandled_rejection_response; +use crate::servers::apis::v1::responses::{bad_request_response, unhandled_rejection_response}; /// `200` response that contains the `AuthKey` resource as json. /// @@ -22,12 +22,20 @@ pub fn auth_key_response(auth_key: &AuthKey) -> Response { .into_response() } +// Error responses + /// `500` error response when a new authentication key cannot be generated. #[must_use] pub fn failed_to_generate_key_response(e: E) -> Response { unhandled_rejection_response(format!("failed to generate key: {e}")) } +/// `500` error response when the provide key cannot be added. +#[must_use] +pub fn failed_to_add_key_response(e: E) -> Response { + unhandled_rejection_response(format!("failed to add key: {e}")) +} + /// `500` error response when an authentication key cannot be deleted. #[must_use] pub fn failed_to_delete_key_response(e: E) -> Response { @@ -40,3 +48,13 @@ pub fn failed_to_delete_key_response(e: E) -> Response { pub fn failed_to_reload_keys_response(e: E) -> Response { unhandled_rejection_response(format!("failed to reload keys: {e}")) } + +#[must_use] +pub fn invalid_auth_key_response(auth_key: &str, e: E) -> Response { + bad_request_response(&format!("Invalid URL: invalid auth key: string \"{auth_key}\", {e}")) +} + +#[must_use] +pub fn invalid_auth_key_duration_response(duration: u64) -> Response { + bad_request_response(&format!("Invalid URL: invalid auth key duration: \"{duration}\"")) +} diff --git a/src/servers/apis/v1/context/auth_key/routes.rs b/src/servers/apis/v1/context/auth_key/routes.rs index 76c634e21..60ccd77ab 100644 --- a/src/servers/apis/v1/context/auth_key/routes.rs +++ b/src/servers/apis/v1/context/auth_key/routes.rs @@ -11,8 +11,8 @@ use std::sync::Arc; use axum::routing::{get, post}; use axum::Router; -use super::handlers::{delete_auth_key_handler, generate_auth_key_handler, reload_keys_handler}; -use crate::tracker::Tracker; +use super::handlers::{add_auth_key_handler, delete_auth_key_handler, generate_auth_key_handler, reload_keys_handler}; +use crate::core::Tracker; /// It adds the routes to the router for the [`auth_key`](crate::servers::apis::v1::context::auth_key) API context. pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { @@ -21,8 +21,12 @@ pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { .route( // code-review: Axum does not allow two routes with the same path but different path variable name. // In the new major API version, `seconds_valid` should be a POST form field so that we will have two paths: - // POST /key - // DELETE /key/:key + // + // POST /keys + // DELETE /keys/:key + // + // The POST /key/:seconds_valid has been deprecated and it will removed in the future. + // Use POST /keys &format!("{prefix}/key/:seconds_valid_or_key"), post(generate_auth_key_handler) .with_state(tracker.clone()) @@ -30,5 +34,9 @@ pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { .with_state(tracker.clone()), ) // Keys command - .route(&format!("{prefix}/keys/reload"), get(reload_keys_handler).with_state(tracker)) + .route( + &format!("{prefix}/keys/reload"), + get(reload_keys_handler).with_state(tracker.clone()), + ) + .route(&format!("{prefix}/keys"), post(add_auth_key_handler).with_state(tracker)) } diff --git a/src/servers/apis/v1/context/health_check/handlers.rs b/src/servers/apis/v1/context/health_check/handlers.rs new file mode 100644 index 000000000..bfbeab549 --- /dev/null +++ b/src/servers/apis/v1/context/health_check/handlers.rs @@ -0,0 +1,11 @@ +//! API handlers for the [`stats`](crate::servers::apis::v1::context::health_check) +//! API context. + +use axum::Json; + +use super::resources::{Report, Status}; + +/// Endpoint for container health check. +pub async fn health_check_handler() -> Json { + Json(Report { status: Status::Ok }) +} diff --git a/src/servers/apis/v1/context/health_check/mod.rs b/src/servers/apis/v1/context/health_check/mod.rs new file mode 100644 index 000000000..b73849511 --- /dev/null +++ b/src/servers/apis/v1/context/health_check/mod.rs @@ -0,0 +1,34 @@ +//! API health check endpoint. +//! +//! It is used to check is the service is running. Especially for containers. +//! +//! # Endpoints +//! +//! - [Health Check](#health-check) +//! +//! # Health Check +//! +//! `GET /api/health_check` +//! +//! Returns the API status. +//! +//! **Example request** +//! +//! ```bash +//! curl "http://127.0.0.1:1212/api/health_check" +//! ``` +//! +//! **Example response** `200` +//! +//! ```json +//! { +//! "status": "Ok", +//! } +//! ``` +//! +//! **Resource** +//! +//! Refer to the API [`Stats`](crate::servers::apis::v1::context::health_check::resources::Report) +//! resource for more information about the response attributes. +pub mod handlers; +pub mod resources; diff --git a/src/servers/apis/v1/context/health_check/resources.rs b/src/servers/apis/v1/context/health_check/resources.rs new file mode 100644 index 000000000..9830e643c --- /dev/null +++ b/src/servers/apis/v1/context/health_check/resources.rs @@ -0,0 +1,14 @@ +//! API resources for the [`stats`](crate::servers::apis::v1::context::health_check) +//! API context. +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub enum Status { + Ok, + Error, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Report { + pub status: Status, +} diff --git a/src/servers/apis/v1/context/mod.rs b/src/servers/apis/v1/context/mod.rs index 5e268a429..be67cd96a 100644 --- a/src/servers/apis/v1/context/mod.rs +++ b/src/servers/apis/v1/context/mod.rs @@ -3,6 +3,7 @@ //! Each context is a module that contains the API endpoints related to a //! specific resource group. pub mod auth_key; +pub mod health_check; pub mod stats; pub mod torrent; pub mod whitelist; diff --git a/src/servers/apis/v1/context/stats/handlers.rs b/src/servers/apis/v1/context/stats/handlers.rs index dfb983f77..c3be5dc7a 100644 --- a/src/servers/apis/v1/context/stats/handlers.rs +++ b/src/servers/apis/v1/context/stats/handlers.rs @@ -7,12 +7,12 @@ use axum::response::Json; use super::resources::Stats; use super::responses::stats_response; -use crate::tracker::services::statistics::get_metrics; -use crate::tracker::Tracker; +use crate::core::services::statistics::get_metrics; +use crate::core::Tracker; /// It handles the request to get the tracker statistics. /// -/// It returns a `200` response with a json [`Stats`](crate::servers::apis::v1::context::stats::resources::Stats) +/// It returns a `200` response with a json [`Stats`] /// /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::stats#get-tracker-statistics) /// for more information about this endpoint. diff --git a/src/servers/apis/v1/context/stats/resources.rs b/src/servers/apis/v1/context/stats/resources.rs index 355a1e448..9e8ab6bab 100644 --- a/src/servers/apis/v1/context/stats/resources.rs +++ b/src/servers/apis/v1/context/stats/resources.rs @@ -2,7 +2,7 @@ //! API context. use serde::{Deserialize, Serialize}; -use crate::tracker::services::statistics::TrackerMetrics; +use crate::core::services::statistics::TrackerMetrics; /// It contains all the statistics generated by the tracker. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] @@ -50,9 +50,9 @@ impl From for Stats { fn from(metrics: TrackerMetrics) -> Self { Self { torrents: metrics.torrents_metrics.torrents, - seeders: metrics.torrents_metrics.seeders, - completed: metrics.torrents_metrics.completed, - leechers: metrics.torrents_metrics.leechers, + seeders: metrics.torrents_metrics.complete, + completed: metrics.torrents_metrics.downloaded, + leechers: metrics.torrents_metrics.incomplete, tcp4_connections_handled: metrics.protocol_metrics.tcp4_connections_handled, tcp4_announces_handled: metrics.protocol_metrics.tcp4_announces_handled, tcp4_scrapes_handled: metrics.protocol_metrics.tcp4_scrapes_handled, @@ -71,19 +71,20 @@ impl From for Stats { #[cfg(test)] mod tests { + use torrust_tracker_primitives::torrent_metrics::TorrentsMetrics; + use super::Stats; - use crate::tracker::services::statistics::TrackerMetrics; - use crate::tracker::statistics::Metrics; - use crate::tracker::TorrentsMetrics; + use crate::core::services::statistics::TrackerMetrics; + use crate::core::statistics::Metrics; #[test] fn stats_resource_should_be_converted_from_tracker_metrics() { assert_eq!( Stats::from(TrackerMetrics { torrents_metrics: TorrentsMetrics { - seeders: 1, - completed: 2, - leechers: 3, + complete: 1, + downloaded: 2, + incomplete: 3, torrents: 4 }, protocol_metrics: Metrics { diff --git a/src/servers/apis/v1/context/stats/responses.rs b/src/servers/apis/v1/context/stats/responses.rs index a4dad77e4..9d03ccedf 100644 --- a/src/servers/apis/v1/context/stats/responses.rs +++ b/src/servers/apis/v1/context/stats/responses.rs @@ -3,9 +3,9 @@ use axum::response::Json; use super::resources::Stats; -use crate::tracker::services::statistics::TrackerMetrics; +use crate::core::services::statistics::TrackerMetrics; -/// `200` response that contains the [`Stats`](crate::servers::apis::v1::context::stats::resources::Stats) resource as json. +/// `200` response that contains the [`Stats`] resource as json. pub fn stats_response(tracker_metrics: TrackerMetrics) -> Json { Json(Stats::from(tracker_metrics)) } diff --git a/src/servers/apis/v1/context/stats/routes.rs b/src/servers/apis/v1/context/stats/routes.rs index 9198562dd..d8d552697 100644 --- a/src/servers/apis/v1/context/stats/routes.rs +++ b/src/servers/apis/v1/context/stats/routes.rs @@ -9,7 +9,7 @@ use axum::routing::get; use axum::Router; use super::handlers::get_stats_handler; -use crate::tracker::Tracker; +use crate::core::Tracker; /// It adds the routes to the router for the [`stats`](crate::servers::apis::v1::context::stats) API context. pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { diff --git a/src/servers/apis/v1/context/torrent/handlers.rs b/src/servers/apis/v1/context/torrent/handlers.rs index 002d4356e..b2418c689 100644 --- a/src/servers/apis/v1/context/torrent/handlers.rs +++ b/src/servers/apis/v1/context/torrent/handlers.rs @@ -4,18 +4,20 @@ use std::fmt; use std::str::FromStr; use std::sync::Arc; -use axum::extract::{Path, Query, State}; -use axum::response::{IntoResponse, Json, Response}; -use log::debug; +use axum::extract::{Path, State}; +use axum::response::{IntoResponse, Response}; +use axum_extra::extract::Query; use serde::{de, Deserialize, Deserializer}; +use thiserror::Error; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::pagination::Pagination; +use tracing::debug; -use super::resources::torrent::ListItem; use super::responses::{torrent_info_response, torrent_list_response, torrent_not_known_response}; +use crate::core::services::torrent::{get_torrent_info, get_torrents, get_torrents_page}; +use crate::core::Tracker; use crate::servers::apis::v1::responses::invalid_info_hash_param_response; use crate::servers::apis::InfoHashParam; -use crate::shared::bit_torrent::info_hash::InfoHash; -use crate::tracker::services::torrent::{get_torrent_info, get_torrents, Pagination}; -use crate::tracker::Tracker; /// It handles the request to get the torrent data. /// @@ -36,39 +38,87 @@ pub async fn get_torrent_handler(State(tracker): State>, Path(info_ } } -/// A container for the optional URL query pagination parameters: -/// `offset` and `limit`. +/// A container for the URL query parameters. +/// +/// Pagination: `offset` and `limit`. +/// Array of infohashes: `info_hash`. +/// +/// You can either get all torrents with pagination or get a list of torrents +/// providing a list of infohashes. For example: +/// +/// First page of torrents: +/// +/// +/// +/// +/// Only two torrents: +/// +/// +/// +/// +/// NOTICE: Pagination is ignored if array of infohashes is provided. #[derive(Deserialize, Debug)] -pub struct PaginationParams { +pub struct QueryParams { /// The offset of the first page to return. Starts at 0. #[serde(default, deserialize_with = "empty_string_as_none")] pub offset: Option, - /// The maximum number of items to return per page + /// The maximum number of items to return per page. #[serde(default, deserialize_with = "empty_string_as_none")] pub limit: Option, + /// A list of infohashes to retrieve. + #[serde(default, rename = "info_hash")] + pub info_hashes: Vec, } /// It handles the request to get a list of torrents. /// -/// It returns a `200` response with a json array with -/// [`ListItem`](crate::servers::apis::v1::context::torrent::resources::torrent::ListItem) -/// resources. +/// It returns a `200` response with a json array with [`crate::servers::apis::v1::context::torrent::resources::torrent::ListItem`] resources. /// /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::torrent#list-torrents) /// for more information about this endpoint. -pub async fn get_torrents_handler( - State(tracker): State>, - pagination: Query, -) -> Json> { +pub async fn get_torrents_handler(State(tracker): State>, pagination: Query) -> Response { debug!("pagination: {:?}", pagination); - torrent_list_response( - &get_torrents( - tracker.clone(), - &Pagination::new_with_options(pagination.0.offset, pagination.0.limit), + if pagination.0.info_hashes.is_empty() { + torrent_list_response( + &get_torrents_page( + tracker.clone(), + Some(&Pagination::new_with_options(pagination.0.offset, pagination.0.limit)), + ) + .await, ) - .await, - ) + .into_response() + } else { + match parse_info_hashes(pagination.0.info_hashes) { + Ok(info_hashes) => torrent_list_response(&get_torrents(tracker.clone(), &info_hashes).await).into_response(), + Err(err) => match err { + QueryParamError::InvalidInfoHash { info_hash } => invalid_info_hash_param_response(&info_hash), + }, + } + } +} + +#[derive(Error, Debug)] +pub enum QueryParamError { + #[error("invalid infohash {info_hash}")] + InvalidInfoHash { info_hash: String }, +} + +fn parse_info_hashes(info_hashes_str: Vec) -> Result, QueryParamError> { + let mut info_hashes: Vec = Vec::new(); + + for info_hash_str in info_hashes_str { + match InfoHash::from_str(&info_hash_str) { + Ok(info_hash) => info_hashes.push(info_hash), + Err(_err) => { + return Err(QueryParamError::InvalidInfoHash { + info_hash: info_hash_str, + }) + } + } + } + + Ok(info_hashes) } /// Serde deserialization decorator to map empty Strings to None, diff --git a/src/servers/apis/v1/context/torrent/resources/peer.rs b/src/servers/apis/v1/context/torrent/resources/peer.rs index 539637b35..e7a0802c1 100644 --- a/src/servers/apis/v1/context/torrent/resources/peer.rs +++ b/src/servers/apis/v1/context/torrent/resources/peer.rs @@ -1,12 +1,12 @@ //! `Peer` and Peer `Id` API resources. +use derive_more::From; use serde::{Deserialize, Serialize}; - -use crate::tracker; +use torrust_tracker_primitives::peer; /// `Peer` API resource. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] pub struct Peer { - /// The peer's ID. See [`Id`](crate::servers::apis::v1::context::torrent::resources::peer::Id). + /// The peer's ID. See [`Id`]. pub peer_id: Id, /// The peer's socket address. For example: `192.168.1.88:17548`. pub peer_addr: String, @@ -22,7 +22,7 @@ pub struct Peer { /// The peer's left bytes (pending to download). pub left: i64, /// The peer's event: `started`, `stopped`, `completed`. - /// See [`AnnounceEventDef`](crate::shared::bit_torrent::common::AnnounceEventDef). + /// See [`AnnounceEvent`](torrust_tracker_primitives::announce_event::AnnounceEvent). pub event: String, } @@ -35,27 +35,41 @@ pub struct Id { pub client: Option, } -impl From for Id { - fn from(peer_id: tracker::peer::Id) -> Self { +impl From for Id { + fn from(peer_id: peer::Id) -> Self { Id { id: peer_id.to_hex_string(), - client: peer_id.get_client_name().map(std::string::ToString::to_string), + client: peer_id.get_client_name(), } } } -impl From for Peer { - #[allow(deprecated)] - fn from(peer: tracker::peer::Peer) -> Self { +impl From for Peer { + fn from(value: peer::Peer) -> Self { + #[allow(deprecated)] Peer { - peer_id: Id::from(peer.peer_id), - peer_addr: peer.peer_addr.to_string(), - updated: peer.updated.as_millis(), - updated_milliseconds_ago: peer.updated.as_millis(), - uploaded: peer.uploaded.0, - downloaded: peer.downloaded.0, - left: peer.left.0, - event: format!("{:?}", peer.event), + peer_id: Id::from(value.peer_id), + peer_addr: value.peer_addr.to_string(), + updated: value.updated.as_millis(), + updated_milliseconds_ago: value.updated.as_millis(), + uploaded: value.uploaded.0, + downloaded: value.downloaded.0, + left: value.left.0, + event: format!("{:?}", value.event), + } + } +} + +#[derive(From, PartialEq, Default)] +pub struct Vector(pub Vec); + +impl FromIterator for Vector { + fn from_iter>(iter: T) -> Self { + let mut peers = Vector::default(); + + for i in iter { + peers.0.push(i.into()); } + peers } } diff --git a/src/servers/apis/v1/context/torrent/resources/torrent.rs b/src/servers/apis/v1/context/torrent/resources/torrent.rs index c9dbd1c02..0d65b3eb6 100644 --- a/src/servers/apis/v1/context/torrent/resources/torrent.rs +++ b/src/servers/apis/v1/context/torrent/resources/torrent.rs @@ -2,12 +2,11 @@ //! //! - `Torrent` is the full torrent resource. //! - `ListItem` is a list item resource on a torrent list. `ListItem` does -//! include a `peers` field but it is always `None` in the struct and `null` in -//! the JSON response. +//! include a `peers` field but it is always `None` in the struct and `null` in +//! the JSON response. use serde::{Deserialize, Serialize}; -use super::peer; -use crate::tracker::services::torrent::{BasicInfo, Info}; +use crate::core::services::torrent::{BasicInfo, Info}; /// `Torrent` API resource. #[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] @@ -44,9 +43,6 @@ pub struct ListItem { /// The torrent's leechers counter. Active peers that are downloading the /// torrent. pub leechers: u64, - /// The torrent's peers. It's always `None` in the struct and `null` in the - /// JSON response. - pub peers: Option>, // todo: this is always None. Remove field from endpoint? } impl ListItem { @@ -59,8 +55,8 @@ impl ListItem { } } -/// Maps an array of the domain type [`BasicInfo`](crate::tracker::services::torrent::BasicInfo) -/// to the API resource type [`ListItem`](crate::servers::apis::v1::context::torrent::resources::torrent::ListItem). +/// Maps an array of the domain type [`BasicInfo`] +/// to the API resource type [`ListItem`]. #[must_use] pub fn to_resource(basic_info_vec: &[BasicInfo]) -> Vec { basic_info_vec @@ -71,14 +67,16 @@ pub fn to_resource(basic_info_vec: &[BasicInfo]) -> Vec { impl From for Torrent { fn from(info: Info) -> Self { + let peers: Option = info.peers.map(|peers| peers.into_iter().collect()); + + let peers: Option> = peers.map(|peers| peers.0); + Self { info_hash: info.info_hash.to_string(), seeders: info.seeders, completed: info.completed, leechers: info.leechers, - peers: info - .peers - .map(|peers| peers.iter().map(|peer| peer::Peer::from(*peer)).collect()), + peers, } } } @@ -90,7 +88,6 @@ impl From for ListItem { seeders: basic_info.seeders, completed: basic_info.completed, leechers: basic_info.leechers, - peers: None, } } } @@ -100,15 +97,14 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::str::FromStr; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; use super::Torrent; + use crate::core::services::torrent::{BasicInfo, Info}; use crate::servers::apis::v1::context::torrent::resources::peer::Peer; use crate::servers::apis::v1::context::torrent::resources::torrent::ListItem; - use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::shared::clock::DurationSinceUnixEpoch; - use crate::tracker::peer; - use crate::tracker::services::torrent::{BasicInfo, Info}; fn sample_peer() -> peer::Peer { peer::Peer { @@ -156,7 +152,6 @@ mod tests { seeders: 1, completed: 2, leechers: 3, - peers: None, } ); } diff --git a/src/servers/apis/v1/context/torrent/responses.rs b/src/servers/apis/v1/context/torrent/responses.rs index d3be092eb..5daceaf94 100644 --- a/src/servers/apis/v1/context/torrent/responses.rs +++ b/src/servers/apis/v1/context/torrent/responses.rs @@ -4,17 +4,17 @@ use axum::response::{IntoResponse, Json, Response}; use serde_json::json; use super::resources::torrent::{ListItem, Torrent}; -use crate::tracker::services::torrent::{BasicInfo, Info}; +use crate::core::services::torrent::{BasicInfo, Info}; /// `200` response that contains an array of -/// [`ListItem`](crate::servers::apis::v1::context::torrent::resources::torrent::ListItem) +/// [`ListItem`] /// resources as json. pub fn torrent_list_response(basic_infos: &[BasicInfo]) -> Json> { Json(ListItem::new_vec(basic_infos)) } /// `200` response that contains a -/// [`Torrent`](crate::servers::apis::v1::context::torrent::resources::torrent::Torrent) +/// [`Torrent`] /// resources as json. pub fn torrent_info_response(info: Info) -> Json { Json(Torrent::from(info)) diff --git a/src/servers/apis/v1/context/torrent/routes.rs b/src/servers/apis/v1/context/torrent/routes.rs index 18295f2a2..6f8c28df5 100644 --- a/src/servers/apis/v1/context/torrent/routes.rs +++ b/src/servers/apis/v1/context/torrent/routes.rs @@ -10,7 +10,7 @@ use axum::routing::get; use axum::Router; use super::handlers::{get_torrent_handler, get_torrents_handler}; -use crate::tracker::Tracker; +use crate::core::Tracker; /// It adds the routes to the router for the [`torrent`](crate::servers::apis::v1::context::torrent) API context. pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { diff --git a/src/servers/apis/v1/context/whitelist/handlers.rs b/src/servers/apis/v1/context/whitelist/handlers.rs index bd1da735e..32e434918 100644 --- a/src/servers/apis/v1/context/whitelist/handlers.rs +++ b/src/servers/apis/v1/context/whitelist/handlers.rs @@ -5,14 +5,14 @@ use std::sync::Arc; use axum::extract::{Path, State}; use axum::response::Response; +use torrust_tracker_primitives::info_hash::InfoHash; use super::responses::{ failed_to_reload_whitelist_response, failed_to_remove_torrent_from_whitelist_response, failed_to_whitelist_torrent_response, }; +use crate::core::Tracker; use crate::servers::apis::v1::responses::{invalid_info_hash_param_response, ok_response}; use crate::servers::apis::InfoHashParam; -use crate::shared::bit_torrent::info_hash::InfoHash; -use crate::tracker::Tracker; /// It handles the request to add a torrent to the whitelist. /// @@ -42,7 +42,7 @@ pub async fn add_torrent_to_whitelist_handler( /// /// - `200` response with a [`ActionStatus::Ok`](crate::servers::apis::v1::responses::ActionStatus::Ok) in json. /// - `500` with serialized error in debug format if the torrent couldn't be -/// removed from the whitelisted. +/// removed from the whitelisted. /// /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::whitelist#remove-a-torrent-from-the-whitelist) /// for more information about this endpoint. @@ -65,7 +65,7 @@ pub async fn remove_torrent_from_whitelist_handler( /// /// - `200` response with a [`ActionStatus::Ok`](crate::servers::apis::v1::responses::ActionStatus::Ok) in json. /// - `500` with serialized error in debug format if the torrent whitelist -/// couldn't be reloaded from the database. +/// couldn't be reloaded from the database. /// /// Refer to the [API endpoint documentation](crate::servers::apis::v1::context::whitelist#reload-the-whitelist) /// for more information about this endpoint. diff --git a/src/servers/apis/v1/context/whitelist/mod.rs b/src/servers/apis/v1/context/whitelist/mod.rs index 2bb35ef65..79da43fdc 100644 --- a/src/servers/apis/v1/context/whitelist/mod.rs +++ b/src/servers/apis/v1/context/whitelist/mod.rs @@ -11,12 +11,12 @@ //! torrents in the whitelist. The whitelist can be updated using the API. //! //! > **NOTICE**: the whitelist is only used when the tracker is configured to -//! in `listed` or `private_listed` modes. Refer to the -//! [configuration crate documentation](https://docs.rs/torrust-tracker-configuration) -//! to know how to enable the those modes. +//! > in `listed` or `private_listed` modes. Refer to the +//! > [configuration crate documentation](https://docs.rs/torrust-tracker-configuration) +//! > to know how to enable the those modes. //! //! > **NOTICE**: if the tracker is not running in `listed` or `private_listed` -//! modes the requests to the whitelist API will be ignored. +//! > modes the requests to the whitelist API will be ignored. //! //! # Endpoints //! diff --git a/src/servers/apis/v1/context/whitelist/routes.rs b/src/servers/apis/v1/context/whitelist/routes.rs index 65d511341..e4e85181f 100644 --- a/src/servers/apis/v1/context/whitelist/routes.rs +++ b/src/servers/apis/v1/context/whitelist/routes.rs @@ -11,7 +11,7 @@ use axum::routing::{delete, get, post}; use axum::Router; use super::handlers::{add_torrent_to_whitelist_handler, reload_whitelist_handler, remove_torrent_from_whitelist_handler}; -use crate::tracker::Tracker; +use crate::core::Tracker; /// It adds the routes to the router for the [`whitelist`](crate::servers::apis::v1::context::whitelist) API context. pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { diff --git a/src/servers/apis/v1/middlewares/auth.rs b/src/servers/apis/v1/middlewares/auth.rs index 3e8f74d0c..58219c7ca 100644 --- a/src/servers/apis/v1/middlewares/auth.rs +++ b/src/servers/apis/v1/middlewares/auth.rs @@ -23,12 +23,12 @@ //! identify the token. use std::sync::Arc; -use axum::extract::{Query, State}; +use axum::extract::{self}; use axum::http::Request; use axum::middleware::Next; use axum::response::{IntoResponse, Response}; use serde::Deserialize; -use torrust_tracker_configuration::{Configuration, HttpApi}; +use torrust_tracker_configuration::AccessTokens; use crate::servers::apis::v1::responses::unhandled_rejection_response; @@ -38,22 +38,24 @@ pub struct QueryParams { pub token: Option, } +#[derive(Clone, Debug)] +pub struct State { + pub access_tokens: Arc, +} + /// Middleware for authentication using a "token" GET param. /// The token must be one of the tokens in the tracker [HTTP API configuration](torrust_tracker_configuration::HttpApi). -pub async fn auth( - State(config): State>, - Query(params): Query, - request: Request, - next: Next, -) -> Response -where - B: Send, -{ +pub async fn auth( + extract::State(state): extract::State, + extract::Query(params): extract::Query, + request: Request, + next: Next, +) -> Response { let Some(token) = params.token else { return AuthError::Unauthorized.into_response(); }; - if !authenticate(&token, &config.http_api) { + if !authenticate(&token, &state.access_tokens) { return AuthError::TokenNotValid.into_response(); } @@ -76,8 +78,8 @@ impl IntoResponse for AuthError { } } -fn authenticate(token: &str, http_api_config: &HttpApi) -> bool { - http_api_config.contains_token(token) +fn authenticate(token: &str, tokens: &AccessTokens) -> bool { + tokens.values().any(|t| t == token) } /// `500` error response returned when the token is missing. diff --git a/src/servers/apis/v1/mod.rs b/src/servers/apis/v1/mod.rs index 213ee9335..372ae0ff9 100644 --- a/src/servers/apis/v1/mod.rs +++ b/src/servers/apis/v1/mod.rs @@ -12,7 +12,7 @@ //! > **NOTICE**: //! - The authentication keys are only used by the HTTP tracker. //! - The whitelist is only used when the tracker is running in `listed` or -//! `private_listed` mode. +//! `private_listed` mode. //! //! Refer to the [authentication middleware](crate::servers::apis::v1::middlewares::auth) //! for more information about the authentication process. diff --git a/src/servers/apis/v1/responses.rs b/src/servers/apis/v1/responses.rs index ecaf90098..d2c52ac40 100644 --- a/src/servers/apis/v1/responses.rs +++ b/src/servers/apis/v1/responses.rs @@ -61,7 +61,8 @@ pub fn invalid_auth_key_param_response(invalid_key: &str) -> Response { bad_request_response(&format!("Invalid auth key id param \"{invalid_key}\"")) } -fn bad_request_response(body: &str) -> Response { +#[must_use] +pub fn bad_request_response(body: &str) -> Response { ( StatusCode::BAD_REQUEST, [(header::CONTENT_TYPE, "text/plain; charset=utf-8")], diff --git a/src/servers/apis/v1/routes.rs b/src/servers/apis/v1/routes.rs index 74778ca14..3786b3532 100644 --- a/src/servers/apis/v1/routes.rs +++ b/src/servers/apis/v1/routes.rs @@ -4,13 +4,15 @@ use std::sync::Arc; use axum::Router; use super::context::{auth_key, stats, torrent, whitelist}; -use crate::tracker::Tracker; +use crate::core::Tracker; /// Add the routes for the v1 API. pub fn add(prefix: &str, router: Router, tracker: Arc) -> Router { let v1_prefix = format!("{prefix}/v1"); + let router = auth_key::routes::add(&v1_prefix, router, tracker.clone()); let router = stats::routes::add(&v1_prefix, router, tracker.clone()); let router = whitelist::routes::add(&v1_prefix, router, tracker.clone()); + torrent::routes::add(&v1_prefix, router, tracker) } diff --git a/src/servers/custom_axum_server.rs b/src/servers/custom_axum_server.rs new file mode 100644 index 000000000..5705ef24e --- /dev/null +++ b/src/servers/custom_axum_server.rs @@ -0,0 +1,275 @@ +//! Wrapper for Axum server to add timeouts. +//! +//! Copyright (c) Eray Karatay ([@programatik29](https://github.com/programatik29)). +//! +//! See: . +//! +//! If a client opens a HTTP connection and it does not send any requests, the +//! connection is closed after a timeout. You can test it with: +//! +//! ```text +//! telnet 127.0.0.1 1212 +//! Trying 127.0.0.1... +//! Connected to 127.0.0.1. +//! Escape character is '^]'. +//! Connection closed by foreign host. +//! ``` +//! +//! If you want to know more about Axum and timeouts see . +use std::future::Ready; +use std::io::ErrorKind; +use std::net::TcpListener; +use std::pin::Pin; +use std::task::{Context, Poll}; +use std::time::Duration; + +use axum_server::accept::Accept; +use axum_server::tls_rustls::{RustlsAcceptor, RustlsConfig}; +use axum_server::Server; +use futures_util::{ready, Future}; +use http_body::{Body, Frame}; +use hyper::Response; +use hyper_util::rt::TokioTimer; +use pin_project_lite::pin_project; +use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; +use tokio::sync::mpsc::{self, UnboundedReceiver, UnboundedSender}; +use tokio::time::{Instant, Sleep}; +use tower::Service; + +const HTTP1_HEADER_READ_TIMEOUT: Duration = Duration::from_secs(5); +const HTTP2_KEEP_ALIVE_TIMEOUT: Duration = Duration::from_secs(5); +const HTTP2_KEEP_ALIVE_INTERVAL: Duration = Duration::from_secs(5); + +#[must_use] +pub fn from_tcp_with_timeouts(socket: TcpListener) -> Server { + add_timeouts(axum_server::from_tcp(socket)) +} + +#[must_use] +pub fn from_tcp_rustls_with_timeouts(socket: TcpListener, tls: RustlsConfig) -> Server { + add_timeouts(axum_server::from_tcp_rustls(socket, tls)) +} + +fn add_timeouts(mut server: Server) -> Server { + server.http_builder().http1().timer(TokioTimer::new()); + server.http_builder().http2().timer(TokioTimer::new()); + + server.http_builder().http1().header_read_timeout(HTTP1_HEADER_READ_TIMEOUT); + server + .http_builder() + .http2() + .keep_alive_timeout(HTTP2_KEEP_ALIVE_TIMEOUT) + .keep_alive_interval(HTTP2_KEEP_ALIVE_INTERVAL); + + server +} + +#[derive(Clone)] +pub struct TimeoutAcceptor; + +impl Accept for TimeoutAcceptor { + type Stream = TimeoutStream; + type Service = TimeoutService; + type Future = Ready>; + + fn accept(&self, stream: I, service: S) -> Self::Future { + let (tx, rx) = mpsc::unbounded_channel(); + + let stream = TimeoutStream::new(stream, HTTP1_HEADER_READ_TIMEOUT, rx); + let service = TimeoutService::new(service, tx); + + std::future::ready(Ok((stream, service))) + } +} + +#[derive(Clone)] +pub struct TimeoutService { + inner: S, + sender: UnboundedSender, +} + +impl TimeoutService { + fn new(inner: S, sender: UnboundedSender) -> Self { + Self { inner, sender } + } +} + +impl Service for TimeoutService +where + S: Service>, +{ + type Response = Response>; + type Error = S::Error; + type Future = TimeoutServiceFuture; + + fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll> { + self.inner.poll_ready(cx) + } + + fn call(&mut self, req: Request) -> Self::Future { + // send timer wait signal + let _ = self.sender.send(TimerSignal::Wait); + + TimeoutServiceFuture::new(self.inner.call(req), self.sender.clone()) + } +} + +pin_project! { + pub struct TimeoutServiceFuture { + #[pin] + inner: F, + sender: Option>, + } +} + +impl TimeoutServiceFuture { + fn new(inner: F, sender: UnboundedSender) -> Self { + Self { + inner, + sender: Some(sender), + } + } +} + +impl Future for TimeoutServiceFuture +where + F: Future, E>>, +{ + type Output = Result>, E>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + this.inner.poll(cx).map(|result| { + result.map(|response| { + response.map(|body| TimeoutBody::new(body, this.sender.take().expect("future polled after ready"))) + }) + }) + } +} + +enum TimerSignal { + Wait, + Reset, +} + +pin_project! { + pub struct TimeoutBody { + #[pin] + inner: B, + sender: UnboundedSender, + } +} + +impl TimeoutBody { + fn new(inner: B, sender: UnboundedSender) -> Self { + Self { inner, sender } + } +} + +impl Body for TimeoutBody { + type Data = B::Data; + type Error = B::Error; + + fn poll_frame(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll, Self::Error>>> { + let this = self.project(); + let option = ready!(this.inner.poll_frame(cx)); + + if option.is_none() { + let _ = this.sender.send(TimerSignal::Reset); + } + + Poll::Ready(option) + } + + fn is_end_stream(&self) -> bool { + let is_end_stream = self.inner.is_end_stream(); + + if is_end_stream { + let _ = self.sender.send(TimerSignal::Reset); + } + + is_end_stream + } + + fn size_hint(&self) -> http_body::SizeHint { + self.inner.size_hint() + } +} + +pub struct TimeoutStream { + inner: IO, + // hyper requires unpin + sleep: Pin>, + duration: Duration, + waiting: bool, + receiver: UnboundedReceiver, + finished: bool, +} + +impl TimeoutStream { + fn new(inner: IO, duration: Duration, receiver: UnboundedReceiver) -> Self { + Self { + inner, + sleep: Box::pin(tokio::time::sleep(duration)), + duration, + waiting: false, + receiver, + finished: false, + } + } +} + +impl AsyncRead for TimeoutStream { + fn poll_read(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut ReadBuf<'_>) -> Poll> { + if !self.finished { + match Pin::new(&mut self.receiver).poll_recv(cx) { + // reset the timer + Poll::Ready(Some(TimerSignal::Reset)) => { + self.waiting = false; + + let deadline = Instant::now() + self.duration; + self.sleep.as_mut().reset(deadline); + } + // enter waiting mode (for response body last chunk) + Poll::Ready(Some(TimerSignal::Wait)) => self.waiting = true, + Poll::Ready(None) => self.finished = true, + Poll::Pending => (), + } + } + + if !self.waiting { + // return error if timer is elapsed + if let Poll::Ready(()) = self.sleep.as_mut().poll(cx) { + return Poll::Ready(Err(std::io::Error::new(ErrorKind::TimedOut, "request header read timed out"))); + } + } + + Pin::new(&mut self.inner).poll_read(cx, buf) + } +} + +impl AsyncWrite for TimeoutStream { + fn poll_write(mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8]) -> Poll> { + Pin::new(&mut self.inner).poll_write(cx, buf) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.inner).poll_flush(cx) + } + + fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + Pin::new(&mut self.inner).poll_shutdown(cx) + } + + fn poll_write_vectored( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + bufs: &[std::io::IoSlice<'_>], + ) -> Poll> { + Pin::new(&mut self.inner).poll_write_vectored(cx, bufs) + } + + fn is_write_vectored(&self) -> bool { + self.inner.is_write_vectored() + } +} diff --git a/src/servers/health_check_api/handlers.rs b/src/servers/health_check_api/handlers.rs new file mode 100644 index 000000000..944e84a1d --- /dev/null +++ b/src/servers/health_check_api/handlers.rs @@ -0,0 +1,50 @@ +use std::collections::VecDeque; + +use axum::extract::State; +use axum::Json; + +use super::resources::{CheckReport, Report}; +use super::responses; +use crate::servers::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistry}; + +/// Endpoint for container health check. +/// +/// Creates a vector [`CheckReport`] from the input set of [`CheckJob`], and then builds a report from the results. +/// +pub(crate) async fn health_check_handler(State(register): State) -> Json { + #[allow(unused_assignments)] + let mut checks: VecDeque = VecDeque::new(); + + { + let mutex = register.lock(); + + checks = mutex.await.values().map(ServiceRegistration::spawn_check).collect(); + } + + // if we do not have any checks, lets return a `none` result. + if checks.is_empty() { + return responses::none(); + } + + let jobs = checks.drain(..).map(|c| { + tokio::spawn(async move { + CheckReport { + binding: c.binding, + info: c.info.clone(), + result: c.job.await.expect("it should be able to join into the checking function"), + } + }) + }); + + let results: Vec = futures::future::join_all(jobs) + .await + .drain(..) + .map(|r| r.expect("it should be able to connect to the job")) + .collect(); + + if results.iter().any(CheckReport::fail) { + responses::error("health check failed".to_string(), results) + } else { + responses::ok(results) + } +} diff --git a/src/servers/health_check_api/mod.rs b/src/servers/health_check_api/mod.rs new file mode 100644 index 000000000..24c5232c8 --- /dev/null +++ b/src/servers/health_check_api/mod.rs @@ -0,0 +1,6 @@ +pub mod handlers; +pub mod resources; +pub mod responses; +pub mod server; + +pub const HEALTH_CHECK_API_LOG_TARGET: &str = "HEALTH CHECK API"; diff --git a/src/servers/health_check_api/resources.rs b/src/servers/health_check_api/resources.rs new file mode 100644 index 000000000..3302fb966 --- /dev/null +++ b/src/servers/health_check_api/resources.rs @@ -0,0 +1,64 @@ +use std::net::SocketAddr; + +use serde::{Deserialize, Serialize}; + +#[derive(Copy, Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] +pub enum Status { + Ok, + Error, + None, +} + +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct CheckReport { + pub binding: SocketAddr, + pub info: String, + pub result: Result, +} + +impl CheckReport { + #[must_use] + pub fn pass(&self) -> bool { + self.result.is_ok() + } + #[must_use] + pub fn fail(&self) -> bool { + self.result.is_err() + } +} + +#[derive(Clone, Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Report { + pub status: Status, + pub message: String, + pub details: Vec, +} + +impl Report { + #[must_use] + pub fn none() -> Report { + Self { + status: Status::None, + message: String::new(), + details: Vec::default(), + } + } + + #[must_use] + pub fn ok(details: Vec) -> Report { + Self { + status: Status::Ok, + message: String::new(), + details, + } + } + + #[must_use] + pub fn error(message: String, details: Vec) -> Report { + Self { + status: Status::Error, + message, + details, + } + } +} diff --git a/src/servers/health_check_api/responses.rs b/src/servers/health_check_api/responses.rs new file mode 100644 index 000000000..3796d8be4 --- /dev/null +++ b/src/servers/health_check_api/responses.rs @@ -0,0 +1,15 @@ +use axum::Json; + +use super::resources::{CheckReport, Report}; + +pub fn ok(details: Vec) -> Json { + Json(Report::ok(details)) +} + +pub fn error(message: String, details: Vec) -> Json { + Json(Report::error(message, details)) +} + +pub fn none() -> Json { + Json(Report::none()) +} diff --git a/src/servers/health_check_api/server.rs b/src/servers/health_check_api/server.rs new file mode 100644 index 000000000..89fbafe45 --- /dev/null +++ b/src/servers/health_check_api/server.rs @@ -0,0 +1,100 @@ +//! Logic to run the Health Check HTTP API server. +//! +//! This API is intended to be used by the container infrastructure to check if +//! the whole application is healthy. +use std::net::SocketAddr; +use std::time::Duration; + +use axum::http::HeaderName; +use axum::response::Response; +use axum::routing::get; +use axum::{Json, Router}; +use axum_server::Handle; +use futures::Future; +use hyper::Request; +use serde_json::json; +use tokio::sync::oneshot::{Receiver, Sender}; +use tower_http::compression::CompressionLayer; +use tower_http::propagate_header::PropagateHeaderLayer; +use tower_http::request_id::{MakeRequestUuid, SetRequestIdLayer}; +use tower_http::trace::{DefaultMakeSpan, TraceLayer}; +use tracing::{debug, Level, Span}; + +use crate::bootstrap::jobs::Started; +use crate::servers::health_check_api::handlers::health_check_handler; +use crate::servers::health_check_api::HEALTH_CHECK_API_LOG_TARGET; +use crate::servers::registar::ServiceRegistry; +use crate::servers::signals::{graceful_shutdown, Halted}; + +/// Starts Health Check API server. +/// +/// # Panics +/// +/// Will panic if binding to the socket address fails. +pub fn start( + bind_to: SocketAddr, + tx: Sender, + rx_halt: Receiver, + register: ServiceRegistry, +) -> impl Future> { + let router = Router::new() + .route("/", get(|| async { Json(json!({})) })) + .route("/health_check", get(health_check_handler)) + .with_state(register) + .layer(CompressionLayer::new()) + .layer(SetRequestIdLayer::x_request_id(MakeRequestUuid)) + .layer(PropagateHeaderLayer::new(HeaderName::from_static("x-request-id"))) + .layer( + TraceLayer::new_for_http() + .make_span_with(DefaultMakeSpan::new().level(Level::INFO)) + .on_request(|request: &Request, _span: &Span| { + let method = request.method().to_string(); + let uri = request.uri().to_string(); + let request_id = request + .headers() + .get("x-request-id") + .map(|v| v.to_str().unwrap_or_default()) + .unwrap_or_default(); + + tracing::span!( + target: HEALTH_CHECK_API_LOG_TARGET, + tracing::Level::INFO, "request", method = %method, uri = %uri, request_id = %request_id); + }) + .on_response(|response: &Response, latency: Duration, _span: &Span| { + let status_code = response.status(); + let request_id = response + .headers() + .get("x-request-id") + .map(|v| v.to_str().unwrap_or_default()) + .unwrap_or_default(); + let latency_ms = latency.as_millis(); + + tracing::span!( + target: HEALTH_CHECK_API_LOG_TARGET, + tracing::Level::INFO, "response", latency = %latency_ms, status = %status_code, request_id = %request_id); + }), + ) + .layer(SetRequestIdLayer::x_request_id(MakeRequestUuid)); + + let socket = std::net::TcpListener::bind(bind_to).expect("Could not bind tcp_listener to address."); + let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); + + let handle = Handle::new(); + + debug!(target: HEALTH_CHECK_API_LOG_TARGET, "Starting service with graceful shutdown in a spawned task ..."); + + tokio::task::spawn(graceful_shutdown( + handle.clone(), + rx_halt, + format!("Shutting down http server on socket address: {address}"), + )); + + let running = axum_server::from_tcp(socket) + .handle(handle) + .serve(router.into_make_service_with_connect_info::()); + + tx.send(Started { address }) + .expect("the Health Check API server should not be dropped"); + + running +} diff --git a/src/servers/http/mod.rs b/src/servers/http/mod.rs index 067e88fdd..4ef5ca7ea 100644 --- a/src/servers/http/mod.rs +++ b/src/servers/http/mod.rs @@ -39,7 +39,7 @@ //! **Query parameters** //! //! > **NOTICE**: you can click on the parameter name to see a full description -//! after extracting and parsing the parameter from the URL query component. +//! > after extracting and parsing the parameter from the URL query component. //! //! Parameter | Type | Description | Required | Default | Example //! ---|---|---|---|---|--- @@ -58,40 +58,40 @@ //! request for more information about the parameters. //! //! > **NOTICE**: the [BEP 03](https://www.bittorrent.org/beps/bep_0003.html) -//! defines only the `ip` and `event` parameters as optional. However, the -//! tracker assigns default values to the optional parameters if they are not -//! provided. +//! > defines only the `ip` and `event` parameters as optional. However, the +//! > tracker assigns default values to the optional parameters if they are not +//! > provided. //! //! > **NOTICE**: the `peer_addr` parameter is not part of the original -//! specification. But the peer IP was added in the -//! [UDP Tracker protocol](https://www.bittorrent.org/beps/bep_0015.html). It is -//! used to provide the peer's IP address to the tracker, but it is ignored by -//! the tracker. The tracker uses the IP address of the peer that sent the -//! request or the right-most-ip in the `X-Forwarded-For` header if the tracker -//! is behind a reverse proxy. +//! > specification. But the peer IP was added in the +//! > [UDP Tracker protocol](https://www.bittorrent.org/beps/bep_0015.html). It is +//! > used to provide the peer's IP address to the tracker, but it is ignored by +//! > the tracker. The tracker uses the IP address of the peer that sent the +//! > request or the right-most-ip in the `X-Forwarded-For` header if the tracker +//! > is behind a reverse proxy. //! //! > **NOTICE**: the maximum number of peers that the tracker can return is -//! `74`. Defined with a hardcoded const [`MAX_SCRAPE_TORRENTS`](crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS). -//! Refer to [issue 262](https://github.com/torrust/torrust-tracker/issues/262) -//! for more information about this limitation. +//! > `74`. Defined with a hardcoded const [`TORRENT_PEERS_LIMIT`](torrust_tracker_configuration::TORRENT_PEERS_LIMIT). +//! > Refer to [issue 262](https://github.com/torrust/torrust-tracker/issues/262) +//! > for more information about this limitation. //! //! > **NOTICE**: the `info_hash` parameter is NOT a `URL` encoded string param. -//! It is percent encode of the raw `info_hash` bytes (40 bytes). URL `GET` params -//! can contain any bytes, not only well-formed UTF-8. The `info_hash` is a -//! 20-byte SHA1. Check the [`percent_encoding`](crate::servers::http::percent_encoding) -//! module to know more about the encoding. +//! > It is percent encode of the raw `info_hash` bytes (40 bytes). URL `GET` params +//! > can contain any bytes, not only well-formed UTF-8. The `info_hash` is a +//! > 20-byte SHA1. Check the [`percent_encoding`] +//! > module to know more about the encoding. //! //! > **NOTICE**: the `peer_id` parameter is NOT a `URL` encoded string param. -//! It is percent encode of the raw peer ID bytes (20 bytes). URL `GET` params -//! can contain any bytes, not only well-formed UTF-8. The `info_hash` is a -//! 20-byte SHA1. Check the [`percent_encoding`](crate::servers::http::percent_encoding) -//! module to know more about the encoding. +//! > It is percent encode of the raw peer ID bytes (20 bytes). URL `GET` params +//! > can contain any bytes, not only well-formed UTF-8. The `info_hash` is a +//! > 20-byte SHA1. Check the [`percent_encoding`] +//! > module to know more about the encoding. //! //! > **NOTICE**: by default, the tracker returns the non-compact peer list when -//! no `compact` parameter is provided or is empty. The -//! [BEP 23](https://www.bittorrent.org/beps/bep_0023.html) suggests to do the -//! opposite. The tracker should return the compact peer list by default and -//! return the non-compact peer list if the `compact` parameter is `0`. +//! > no `compact` parameter is provided or is empty. The +//! > [BEP 23](https://www.bittorrent.org/beps/bep_0023.html) suggests to do the +//! > opposite. The tracker should return the compact peer list by default and +//! > return the non-compact peer list if the `compact` parameter is `0`. //! //! **Sample announce URL** //! @@ -152,7 +152,7 @@ //! 000000f0: 65 e //! ``` //! -//! Refer to the [`NonCompact`](crate::servers::http::v1::responses::announce::NonCompact) +//! Refer to the [`Normal`](crate::servers::http::v1::responses::announce::Normal), i.e. `Non-Compact` //! response for more information about the response. //! //! **Sample compact response** @@ -206,15 +206,15 @@ //! //! ### Scrape //! -//! The `scrape` request allows a peer to get [swarm metadata](crate::tracker::torrent::SwarmMetadata) +//! The `scrape` request allows a peer to get [swarm metadata](torrust_tracker_primitives::swarm_metadata::SwarmMetadata) //! for multiple torrents at the same time. //! -//! The response contains the [swarm metadata](crate::tracker::torrent::SwarmMetadata) +//! The response contains the [swarm metadata](torrust_tracker_primitives::swarm_metadata::SwarmMetadata) //! for that torrent: //! -//! - [complete](crate::tracker::torrent::SwarmMetadata::complete) -//! - [downloaded](crate::tracker::torrent::SwarmMetadata::downloaded) -//! - [incomplete](crate::tracker::torrent::SwarmMetadata::incomplete) +//! - [complete](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::complete) +//! - [downloaded](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::downloaded) +//! - [incomplete](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::incomplete) //! //! **Query parameters** //! @@ -223,7 +223,7 @@ //! [`info_hash`](crate::servers::http::v1::requests::scrape::Scrape::info_hashes) | percent encoded of 20-byte array | The `Info Hash` of the torrent. | Yes | No | `%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00` //! //! > **NOTICE**: you can scrape multiple torrents at the same time by passing -//! multiple `info_hash` parameters. +//! > multiple `info_hash` parameters. //! //! Refer to the [`Scrape`](crate::servers::http::v1::requests::scrape::Scrape) //! request for more information about the parameters. @@ -237,8 +237,8 @@ //! In order to scrape multiple torrents at the same time you can pass multiple //! `info_hash` parameters: `info_hash=%81%00%0...00%00%00&info_hash=%82%00%0...00%00%00` //! -//! > **NOTICE**: the maximum number of torrent you can scrape at the same time -//! is `74`. Defined with a hardcoded const [`MAX_SCRAPE_TORRENTS`](crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS). +//! > **NOTICE**: the maximum number of torrents you can scrape at the same time +//! > is `74`. Defined with a hardcoded const [`MAX_SCRAPE_TORRENTS`](crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS). //! //! **Sample response** //! @@ -266,7 +266,7 @@ //! Where the `files` key contains a dictionary of dictionaries. The first //! dictionary key is the `info_hash` of the torrent (`iiiiiiiiiiiiiiiiiiii` in //! the example). The second level dictionary contains the -//! [swarm metadata](crate::tracker::torrent::SwarmMetadata) for that torrent. +//! [swarm metadata](torrust_tracker_primitives::swarm_metadata::SwarmMetadata) for that torrent. //! //! If you save the response as a file and you open it with a program that //! can handle binary data you would see: @@ -309,6 +309,8 @@ pub mod percent_encoding; pub mod server; pub mod v1; +pub const HTTP_TRACKER_LOG_TARGET: &str = "HTTP TRACKER"; + /// The version of the HTTP tracker. #[derive(Serialize, Deserialize, Copy, Clone, PartialEq, Eq, Debug)] pub enum Version { diff --git a/src/servers/http/percent_encoding.rs b/src/servers/http/percent_encoding.rs index c8f0f7f12..90f4b9a43 100644 --- a/src/servers/http/percent_encoding.rs +++ b/src/servers/http/percent_encoding.rs @@ -15,11 +15,11 @@ //! - //! - //! - -use crate::shared::bit_torrent::info_hash::{ConversionError, InfoHash}; -use crate::tracker::peer::{self, IdConversionError}; +use torrust_tracker_primitives::info_hash::{self, InfoHash}; +use torrust_tracker_primitives::peer; /// Percent decodes a percent encoded infohash. Internally an -/// [`InfoHash`](crate::shared::bit_torrent::info_hash::InfoHash) is a 20-byte array. +/// [`InfoHash`] is a 20-byte array. /// /// For example, given the infohash `3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0`, /// it's percent encoded representation is `%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0`. @@ -27,8 +27,8 @@ use crate::tracker::peer::{self, IdConversionError}; /// ```rust /// use std::str::FromStr; /// use torrust_tracker::servers::http::percent_encoding::percent_decode_info_hash; -/// use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -/// use torrust_tracker::tracker::peer; +/// use torrust_tracker_primitives::info_hash::InfoHash; +/// use torrust_tracker_primitives::peer; /// /// let encoded_infohash = "%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0"; /// @@ -43,13 +43,13 @@ use crate::tracker::peer::{self, IdConversionError}; /// # Errors /// /// Will return `Err` if the decoded bytes do not represent a valid -/// [`InfoHash`](crate::shared::bit_torrent::info_hash::InfoHash). -pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result { +/// [`InfoHash`]. +pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result { let bytes = percent_encoding::percent_decode_str(raw_info_hash).collect::>(); InfoHash::try_from(bytes) } -/// Percent decodes a percent encoded peer id. Internally a peer [`Id`](crate::tracker::peer::Id) +/// Percent decodes a percent encoded peer id. Internally a peer [`Id`](peer::Id) /// is a 20-byte array. /// /// For example, given the peer id `*b"-qB00000000000000000"`, @@ -58,8 +58,8 @@ pub fn percent_decode_info_hash(raw_info_hash: &str) -> Result Result Result { +/// Will return `Err` if if the decoded bytes do not represent a valid [`peer::Id`]. +pub fn percent_decode_peer_id(raw_peer_id: &str) -> Result { let bytes = percent_encoding::percent_decode_str(raw_peer_id).collect::>(); peer::Id::try_from(bytes) } @@ -80,9 +80,10 @@ pub fn percent_decode_peer_id(raw_peer_id: &str) -> Result Self; - - fn start_with_graceful_shutdown( - &self, - cfg: torrust_tracker_configuration::HttpTracker, - tracker: Arc, - shutdown_signal: F, - ) -> (SocketAddr, BoxFuture<'static, ()>) - where - F: Future + Send + 'static; -} +use super::v1::routes::router; +use crate::bootstrap::jobs::Started; +use crate::core::Tracker; +use crate::servers::custom_axum_server::{self, TimeoutAcceptor}; +use crate::servers::http::HTTP_TRACKER_LOG_TARGET; +use crate::servers::logging::STARTED_ON; +use crate::servers::registar::{ServiceHealthCheckJob, ServiceRegistration, ServiceRegistrationForm}; +use crate::servers::signals::{graceful_shutdown, Halted}; /// Error that can occur when starting or stopping the HTTP server. /// @@ -37,20 +29,74 @@ pub trait HttpServerLauncher: Sync + Send { /// /// - The channel to send the shutdown signal to the server is closed. /// - The task to shutdown the server on the spawned server failed to execute to -/// completion. +/// completion. #[derive(Debug)] pub enum Error { - /// Any kind of error starting or stopping the server. - Error(String), // todo: refactor to use thiserror and add more variants for specific errors. + Error(String), +} + +#[derive(Constructor, Debug)] +pub struct Launcher { + pub bind_to: SocketAddr, + pub tls: Option, +} + +impl Launcher { + fn start(&self, tracker: Arc, tx_start: Sender, rx_halt: Receiver) -> BoxFuture<'static, ()> { + let socket = std::net::TcpListener::bind(self.bind_to).expect("Could not bind tcp_listener to address."); + let address = socket.local_addr().expect("Could not get local_addr from tcp_listener."); + + let handle = Handle::new(); + + tokio::task::spawn(graceful_shutdown( + handle.clone(), + rx_halt, + format!("Shutting down HTTP server on socket address: {address}"), + )); + + let tls = self.tls.clone(); + let protocol = if tls.is_some() { "https" } else { "http" }; + + info!(target: HTTP_TRACKER_LOG_TARGET, "Starting on: {protocol}://{}", address); + + let app = router(tracker, address); + + let running = Box::pin(async { + match tls { + Some(tls) => custom_axum_server::from_tcp_rustls_with_timeouts(socket, tls) + .handle(handle) + // The TimeoutAcceptor is commented because TSL does not work with it. + // See: https://github.com/torrust/torrust-index/issues/204#issuecomment-2115529214 + //.acceptor(TimeoutAcceptor) + .serve(app.into_make_service_with_connect_info::()) + .await + .expect("Axum server crashed."), + None => custom_axum_server::from_tcp_with_timeouts(socket) + .handle(handle) + .acceptor(TimeoutAcceptor) + .serve(app.into_make_service_with_connect_info::()) + .await + .expect("Axum server crashed."), + } + }); + + info!(target: HTTP_TRACKER_LOG_TARGET, "{STARTED_ON}: {protocol}://{}", address); + + tx_start + .send(Started { address }) + .expect("the HTTP(s) Tracker service should not be dropped"); + + running + } } /// A HTTP server instance controller with no HTTP instance running. #[allow(clippy::module_name_repetitions)] -pub type StoppedHttpServer = HttpServer>; +pub type StoppedHttpServer = HttpServer; /// A HTTP server instance controller with a running HTTP instance. #[allow(clippy::module_name_repetitions)] -pub type RunningHttpServer = HttpServer>; +pub type RunningHttpServer = HttpServer; /// A HTTP server instance controller. /// @@ -65,35 +111,32 @@ pub type RunningHttpServer = HttpServer>; /// server but always keeping the same configuration. /// /// > **NOTICE**: if the configurations changes after running the server it will -/// reset to the initial value after stopping the server. This struct is not -/// intended to persist configurations between runs. +/// > reset to the initial value after stopping the server. This struct is not +/// > intended to persist configurations between runs. #[allow(clippy::module_name_repetitions)] pub struct HttpServer { - /// The configuration of the server that will be used every time the server - /// is started. - pub cfg: torrust_tracker_configuration::HttpTracker, /// The state of the server: `running` or `stopped`. pub state: S, } /// A stopped HTTP server state. -pub struct Stopped { - launcher: I, +pub struct Stopped { + launcher: Launcher, } /// A running HTTP server state. -pub struct Running { +pub struct Running { /// The address where the server is bound. - pub bind_addr: SocketAddr, - task_killer: tokio::sync::oneshot::Sender, - task: tokio::task::JoinHandle, + pub binding: SocketAddr, + pub halt_task: tokio::sync::oneshot::Sender, + pub task: tokio::task::JoinHandle, } -impl HttpServer> { +impl HttpServer { /// It creates a new `HttpServer` controller in `stopped` state. - pub fn new(cfg: torrust_tracker_configuration::HttpTracker, launcher: I) -> Self { + #[must_use] + pub fn new(launcher: Launcher) -> Self { Self { - cfg, state: Stopped { launcher }, } } @@ -109,57 +152,111 @@ impl HttpServer> { /// /// It would panic spawned HTTP server launcher cannot send the bound `SocketAddr` /// back to the main thread. - pub async fn start(self, tracker: Arc) -> Result>, Error> { - let (shutdown_sender, shutdown_receiver) = tokio::sync::oneshot::channel::(); - let (addr_sender, addr_receiver) = tokio::sync::oneshot::channel::(); + pub async fn start(self, tracker: Arc, form: ServiceRegistrationForm) -> Result, Error> { + let (tx_start, rx_start) = tokio::sync::oneshot::channel::(); + let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); - let configuration = self.cfg.clone(); let launcher = self.state.launcher; let task = tokio::spawn(async move { - let (bind_addr, server) = - launcher.start_with_graceful_shutdown(configuration, tracker, shutdown_signal(shutdown_receiver)); - - addr_sender.send(bind_addr).expect("Could not return SocketAddr."); + let server = launcher.start(tracker, tx_start, rx_halt); server.await; launcher }); - let bind_address = addr_receiver - .await - .map_err(|_| Error::Error("Could not receive bind_address.".to_string()))?; + let binding = rx_start.await.expect("it should be able to start the service").address; + + form.send(ServiceRegistration::new(binding, check_fn)) + .expect("it should be able to send service registration"); Ok(HttpServer { - cfg: self.cfg, state: Running { - bind_addr: bind_address, - task_killer: shutdown_sender, + binding, + halt_task: tx_halt, task, }, }) } } -impl HttpServer> { +impl HttpServer { /// It stops the server and returns a `HttpServer` controller in `stopped` /// state. /// /// # Errors /// /// It would return an error if the channel for the task killer signal was closed. - pub async fn stop(self) -> Result>, Error> { + pub async fn stop(self) -> Result, Error> { self.state - .task_killer - .send(0) + .halt_task + .send(Halted::Normal) .map_err(|_| Error::Error("Task killer channel was closed.".to_string()))?; let launcher = self.state.task.await.map_err(|e| Error::Error(e.to_string()))?; Ok(HttpServer { - cfg: self.cfg, state: Stopped { launcher }, }) } } + +/// Checks the Health by connecting to the HTTP tracker endpoint. +/// +/// # Errors +/// +/// This function will return an error if unable to connect. +/// Or if the request returns an error. +#[must_use] +pub fn check_fn(binding: &SocketAddr) -> ServiceHealthCheckJob { + let url = format!("http://{binding}/health_check"); // DevSkim: ignore DS137138 + + let info = format!("checking http tracker health check at: {url}"); + + let job = tokio::spawn(async move { + match reqwest::get(url).await { + Ok(response) => Ok(response.status().to_string()), + Err(err) => Err(err.to_string()), + } + }); + + ServiceHealthCheckJob::new(*binding, info, job) +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use torrust_tracker_test_helpers::configuration::ephemeral_public; + + use crate::bootstrap::app::initialize_with_configuration; + use crate::bootstrap::jobs::make_rust_tls; + use crate::servers::http::server::{HttpServer, Launcher}; + use crate::servers::registar::Registar; + + #[tokio::test] + async fn it_should_be_able_to_start_and_stop() { + let cfg = Arc::new(ephemeral_public()); + let tracker = initialize_with_configuration(&cfg); + let http_trackers = cfg.http_trackers.clone().expect("missing HTTP trackers configuration"); + let config = &http_trackers[0]; + + let bind_to = config.bind_address; + + let tls = make_rust_tls(&config.tsl_config) + .await + .map(|tls| tls.expect("tls config failed")); + + let register = &Registar::default(); + + let stopped = HttpServer::new(Launcher::new(bind_to, tls)); + let started = stopped + .start(tracker, register.give_form()) + .await + .expect("it should start the server"); + let stopped = started.stop().await.expect("it should stop the server"); + + assert_eq!(stopped.state.launcher.bind_to, bind_to); + } +} diff --git a/src/servers/http/v1/extractors/announce_request.rs b/src/servers/http/v1/extractors/announce_request.rs index 5d947ef91..d2612f79b 100644 --- a/src/servers/http/v1/extractors/announce_request.rs +++ b/src/servers/http/v1/extractors/announce_request.rs @@ -1,7 +1,7 @@ -//! Axum [`extractor`](axum::extract) for the [`Announce`](crate::servers::http::v1::requests::announce::Announce) +//! Axum [`extractor`](axum::extract) for the [`Announce`] //! request. //! -//! It parses the query parameters returning an [`Announce`](crate::servers::http::v1::requests::announce::Announce) +//! It parses the query parameters returning an [`Announce`] //! request. //! //! Refer to [`Announce`](crate::servers::http::v1::requests::announce) for more @@ -29,31 +29,43 @@ //! ``` use std::panic::Location; -use axum::async_trait; use axum::extract::FromRequestParts; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; +use futures::future::BoxFuture; +use futures::FutureExt; use crate::servers::http::v1::query::Query; use crate::servers::http::v1::requests::announce::{Announce, ParseAnnounceQueryError}; use crate::servers::http::v1::responses; -/// Extractor for the [`Announce`](crate::servers::http::v1::requests::announce::Announce) +/// Extractor for the [`Announce`] /// request. pub struct ExtractRequest(pub Announce); -#[async_trait] impl FromRequestParts for ExtractRequest where S: Send + Sync, { type Rejection = Response; - async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { - match extract_announce_from(parts.uri.query()) { - Ok(announce_request) => Ok(ExtractRequest(announce_request)), - Err(error) => Err(error.into_response()), + #[must_use] + fn from_request_parts<'life0, 'life1, 'async_trait>( + parts: &'life0 mut Parts, + _state: &'life1 S, + ) -> BoxFuture<'async_trait, Result> + where + 'life0: 'async_trait, + 'life1: 'async_trait, + Self: 'async_trait, + { + async { + match extract_announce_from(parts.uri.query()) { + Ok(announce_request) => Ok(ExtractRequest(announce_request)), + Err(error) => Err(error.into_response()), + } } + .boxed() } } @@ -83,11 +95,12 @@ fn extract_announce_from(maybe_raw_query: Option<&str>) -> Result **NOTICE**: the returned HTTP status code is always `200` for authentication errors. -//! Neither [The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) -//! nor [The Private Torrents](https://www.bittorrent.org/beps/bep_0027.html) -//! specifications specify any HTTP status code for authentication errors. +//! > Neither [The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +//! > nor [The Private Torrents](https://www.bittorrent.org/beps/bep_0027.html) +//! > specifications specify any HTTP status code for authentication errors. use std::panic::Location; -use axum::async_trait; use axum::extract::rejection::PathRejection; use axum::extract::{FromRequestParts, Path}; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; +use futures::future::BoxFuture; +use futures::FutureExt; use serde::Deserialize; +use crate::core::auth::Key; use crate::servers::http::v1::handlers::common::auth; use crate::servers::http::v1::responses; -use crate::tracker::auth::Key; -/// Extractor for the [`Key`](crate::tracker::auth::Key) struct. +/// Extractor for the [`Key`] struct. pub struct Extract(pub Key); #[derive(Deserialize)] @@ -68,21 +69,32 @@ impl KeyParam { } } -#[async_trait] impl FromRequestParts for Extract where S: Send + Sync, { type Rejection = Response; - async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { - // Extract `key` from URL path with Axum `Path` extractor - let maybe_path_with_key = Path::::from_request_parts(parts, state).await; - - match extract_key(maybe_path_with_key) { - Ok(key) => Ok(Extract(key)), - Err(error) => Err(error.into_response()), + #[must_use] + fn from_request_parts<'life0, 'life1, 'async_trait>( + parts: &'life0 mut Parts, + state: &'life1 S, + ) -> BoxFuture<'async_trait, Result> + where + 'life0: 'async_trait, + 'life1: 'async_trait, + Self: 'async_trait, + { + async { + // Extract `key` from URL path with Axum `Path` extractor + let maybe_path_with_key = Path::::from_request_parts(parts, state).await; + + match extract_key(maybe_path_with_key) { + Ok(key) => Ok(Extract(key)), + Err(error) => Err(error.into_response()), + } } + .boxed() } } diff --git a/src/servers/http/v1/extractors/client_ip_sources.rs b/src/servers/http/v1/extractors/client_ip_sources.rs index f04300402..5b235fbe0 100644 --- a/src/servers/http/v1/extractors/client_ip_sources.rs +++ b/src/servers/http/v1/extractors/client_ip_sources.rs @@ -16,7 +16,7 @@ //! the tracker will use the `X-Forwarded-For` header to get the client IP //! address. //! -//! See [`torrust_tracker_configuration::Configuration::on_reverse_proxy`]. +//! See [`torrust_tracker_configuration::Configuration::core.on_reverse_proxy`]. //! //! The tracker can also be configured to run without a reverse proxy. In this //! case, the tracker will use the IP address from the connection info. @@ -37,39 +37,51 @@ //! ``` use std::net::SocketAddr; -use axum::async_trait; use axum::extract::{ConnectInfo, FromRequestParts}; use axum::http::request::Parts; use axum::response::Response; use axum_client_ip::RightmostXForwardedFor; +use futures::future::BoxFuture; +use futures::FutureExt; use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; -/// Extractor for the [`ClientIpSources`](crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources) +/// Extractor for the [`ClientIpSources`] /// struct. pub struct Extract(pub ClientIpSources); -#[async_trait] impl FromRequestParts for Extract where S: Send + Sync, { type Rejection = Response; - async fn from_request_parts(parts: &mut Parts, state: &S) -> Result { - let right_most_x_forwarded_for = match RightmostXForwardedFor::from_request_parts(parts, state).await { - Ok(right_most_x_forwarded_for) => Some(right_most_x_forwarded_for.0), - Err(_) => None, - }; + #[must_use] + fn from_request_parts<'life0, 'life1, 'async_trait>( + parts: &'life0 mut Parts, + state: &'life1 S, + ) -> BoxFuture<'async_trait, Result> + where + 'life0: 'async_trait, + 'life1: 'async_trait, + Self: 'async_trait, + { + async { + let right_most_x_forwarded_for = match RightmostXForwardedFor::from_request_parts(parts, state).await { + Ok(right_most_x_forwarded_for) => Some(right_most_x_forwarded_for.0), + Err(_) => None, + }; - let connection_info_ip = match ConnectInfo::::from_request_parts(parts, state).await { - Ok(connection_info_socket_addr) => Some(connection_info_socket_addr.0.ip()), - Err(_) => None, - }; + let connection_info_ip = match ConnectInfo::::from_request_parts(parts, state).await { + Ok(connection_info_socket_addr) => Some(connection_info_socket_addr.0.ip()), + Err(_) => None, + }; - Ok(Extract(ClientIpSources { - right_most_x_forwarded_for, - connection_info_ip, - })) + Ok(Extract(ClientIpSources { + right_most_x_forwarded_for, + connection_info_ip, + })) + } + .boxed() } } diff --git a/src/servers/http/v1/extractors/scrape_request.rs b/src/servers/http/v1/extractors/scrape_request.rs index 63c4dba69..07fa4ccb9 100644 --- a/src/servers/http/v1/extractors/scrape_request.rs +++ b/src/servers/http/v1/extractors/scrape_request.rs @@ -1,7 +1,7 @@ -//! Axum [`extractor`](axum::extract) for the [`Scrape`](crate::servers::http::v1::requests::scrape::Scrape) +//! Axum [`extractor`](axum::extract) for the [`Scrape`] //! request. //! -//! It parses the query parameters returning an [`Scrape`](crate::servers::http::v1::requests::scrape::Scrape) +//! It parses the query parameters returning an [`Scrape`] //! request. //! //! Refer to [`Scrape`](crate::servers::http::v1::requests::scrape) for more @@ -29,31 +29,43 @@ //! ``` use std::panic::Location; -use axum::async_trait; use axum::extract::FromRequestParts; use axum::http::request::Parts; use axum::response::{IntoResponse, Response}; +use futures::future::BoxFuture; +use futures::FutureExt; use crate::servers::http::v1::query::Query; use crate::servers::http::v1::requests::scrape::{ParseScrapeQueryError, Scrape}; use crate::servers::http::v1::responses; -/// Extractor for the [`Scrape`](crate::servers::http::v1::requests::scrape::Scrape) +/// Extractor for the [`Scrape`] /// request. pub struct ExtractRequest(pub Scrape); -#[async_trait] impl FromRequestParts for ExtractRequest where S: Send + Sync, { type Rejection = Response; - async fn from_request_parts(parts: &mut Parts, _state: &S) -> Result { - match extract_scrape_from(parts.uri.query()) { - Ok(scrape_request) => Ok(ExtractRequest(scrape_request)), - Err(error) => Err(error.into_response()), + #[must_use] + fn from_request_parts<'life0, 'life1, 'async_trait>( + parts: &'life0 mut Parts, + _state: &'life1 S, + ) -> BoxFuture<'async_trait, Result> + where + 'life0: 'async_trait, + 'life1: 'async_trait, + Self: 'async_trait, + { + async { + match extract_scrape_from(parts.uri.query()) { + Ok(scrape_request) => Ok(ExtractRequest(scrape_request)), + Err(error) => Err(error.into_response()), + } } + .boxed() } } @@ -83,10 +95,11 @@ fn extract_scrape_from(maybe_raw_query: Option<&str>) -> Result return Err(responses::error::Error::from(error)), } - let peer_ip = match peer_ip_resolver::invoke(tracker.config.on_reverse_proxy, client_ip_sources) { + let peer_ip = match peer_ip_resolver::invoke(tracker.is_behind_reverse_proxy(), client_ip_sources) { Ok(peer_ip) => peer_ip, Err(error) => return Err(responses::error::Error::from(error)), }; @@ -117,13 +118,12 @@ async fn handle_announce( } fn build_response(announce_request: &Announce, announce_data: AnnounceData) -> Response { - match &announce_request.compact { - Some(compact) => match compact { - Compact::Accepted => announce::Compact::from(announce_data).into_response(), - Compact::NotAccepted => announce::NonCompact::from(announce_data).into_response(), - }, - // Default response format non compact - None => announce::NonCompact::from(announce_data).into_response(), + if announce_request.compact.as_ref().is_some_and(|f| *f == Compact::Accepted) { + let response: responses::Announce = announce_data.into(); + response.into_response() + } else { + let response: responses::Announce = announce_data.into(); + response.into_response() } } @@ -131,19 +131,20 @@ fn build_response(announce_request: &Announce, announce_data: AnnounceData) -> R /// /// It ignores the peer address in the announce request params. #[must_use] -fn peer_from_request(announce_request: &Announce, peer_ip: &IpAddr) -> Peer { - Peer { +fn peer_from_request(announce_request: &Announce, peer_ip: &IpAddr) -> peer::Peer { + peer::Peer { peer_id: announce_request.peer_id, peer_addr: SocketAddr::new(*peer_ip, announce_request.port), - updated: Current::now(), + updated: CurrentClock::now(), uploaded: NumberOfBytes(announce_request.uploaded.unwrap_or(0)), downloaded: NumberOfBytes(announce_request.downloaded.unwrap_or(0)), left: NumberOfBytes(announce_request.left.unwrap_or(0)), - event: map_to_aquatic_event(&announce_request.event), + event: map_to_torrust_event(&announce_request.event), } } -fn map_to_aquatic_event(event: &Option) -> AnnounceEvent { +#[must_use] +pub fn map_to_aquatic_event(event: &Option) -> aquatic_udp_protocol::AnnounceEvent { match event { Some(event) => match &event { Event::Started => aquatic_udp_protocol::AnnounceEvent::Started, @@ -154,32 +155,45 @@ fn map_to_aquatic_event(event: &Option) -> AnnounceEvent { } } +#[must_use] +pub fn map_to_torrust_event(event: &Option) -> AnnounceEvent { + match event { + Some(event) => match &event { + Event::Started => AnnounceEvent::Started, + Event::Stopped => AnnounceEvent::Stopped, + Event::Completed => AnnounceEvent::Completed, + }, + None => AnnounceEvent::None, + } +} + #[cfg(test)] mod tests { + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer; use torrust_tracker_test_helpers::configuration; + use crate::core::services::tracker_factory; + use crate::core::Tracker; use crate::servers::http::v1::requests::announce::Announce; use crate::servers::http::v1::responses; use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; - use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::tracker::services::tracker_factory; - use crate::tracker::{peer, Tracker}; fn private_tracker() -> Tracker { - tracker_factory(configuration::ephemeral_mode_private().into()) + tracker_factory(&configuration::ephemeral_private()) } fn whitelisted_tracker() -> Tracker { - tracker_factory(configuration::ephemeral_mode_whitelisted().into()) + tracker_factory(&configuration::ephemeral_listed()) } fn tracker_on_reverse_proxy() -> Tracker { - tracker_factory(configuration::ephemeral_with_reverse_proxy().into()) + tracker_factory(&configuration::ephemeral_with_reverse_proxy()) } fn tracker_not_on_reverse_proxy() -> Tracker { - tracker_factory(configuration::ephemeral_without_reverse_proxy().into()) + tracker_factory(&configuration::ephemeral_without_reverse_proxy()) } fn sample_announce_request() -> Announce { @@ -215,9 +229,9 @@ mod tests { use std::sync::Arc; use super::{private_tracker, sample_announce_request, sample_client_ip_sources}; + use crate::core::auth; use crate::servers::http::v1::handlers::announce::handle_announce; use crate::servers::http::v1::handlers::announce::tests::assert_error_response; - use crate::tracker::auth; #[tokio::test] async fn it_should_fail_when_the_authentication_key_is_missing() { diff --git a/src/servers/http/v1/handlers/common/auth.rs b/src/servers/http/v1/handlers/common/auth.rs index f41635d69..f9a7796a4 100644 --- a/src/servers/http/v1/handlers/common/auth.rs +++ b/src/servers/http/v1/handlers/common/auth.rs @@ -1,12 +1,12 @@ //! HTTP server authentication error and conversion to -//! [`responses::error::Error`](crate::servers::http::v1::responses::error::Error) +//! [`responses::error::Error`] //! response. use std::panic::Location; use thiserror::Error; +use crate::core::auth; use crate::servers::http::v1::responses; -use crate::tracker::auth; /// Authentication error. /// diff --git a/src/servers/http/v1/handlers/common/peer_ip.rs b/src/servers/http/v1/handlers/common/peer_ip.rs index d65efbc79..5602bd26c 100644 --- a/src/servers/http/v1/handlers/common/peer_ip.rs +++ b/src/servers/http/v1/handlers/common/peer_ip.rs @@ -2,7 +2,7 @@ //! //! The HTTP tracker may fail to resolve the peer IP address. This module //! contains the logic to convert those -//! [`PeerIpResolutionError`](crate::servers::http::v1::services::peer_ip_resolver::PeerIpResolutionError) +//! [`PeerIpResolutionError`] //! errors into responses. use crate::servers::http::v1::responses; use crate::servers::http::v1::services::peer_ip_resolver::PeerIpResolutionError; diff --git a/src/servers/http/v1/handlers/health_check.rs b/src/servers/http/v1/handlers/health_check.rs new file mode 100644 index 000000000..b15af6255 --- /dev/null +++ b/src/servers/http/v1/handlers/health_check.rs @@ -0,0 +1,18 @@ +use axum::Json; +use serde::{Deserialize, Serialize}; + +#[allow(clippy::unused_async)] +pub async fn handler() -> Json { + Json(Report { status: Status::Ok }) +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub enum Status { + Ok, + Error, +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)] +pub struct Report { + pub status: Status, +} diff --git a/src/servers/http/v1/handlers/mod.rs b/src/servers/http/v1/handlers/mod.rs index d78dee7d5..7b3a1e7c3 100644 --- a/src/servers/http/v1/handlers/mod.rs +++ b/src/servers/http/v1/handlers/mod.rs @@ -3,10 +3,11 @@ //! Refer to the generic [HTTP server documentation](crate::servers::http) for //! more information about the HTTP tracker. use super::responses; -use crate::tracker::error::Error; +use crate::core::error::Error; pub mod announce; pub mod common; +pub mod health_check; pub mod scrape; impl From for responses::error::Error { diff --git a/src/servers/http/v1/handlers/scrape.rs b/src/servers/http/v1/handlers/scrape.rs index 58b8aa84c..eb8875a58 100644 --- a/src/servers/http/v1/handlers/scrape.rs +++ b/src/servers/http/v1/handlers/scrape.rs @@ -9,16 +9,16 @@ use std::sync::Arc; use axum::extract::State; use axum::response::{IntoResponse, Response}; -use log::debug; +use tracing::debug; +use crate::core::auth::Key; +use crate::core::{ScrapeData, Tracker}; use crate::servers::http::v1::extractors::authentication_key::Extract as ExtractKey; use crate::servers::http::v1::extractors::client_ip_sources::Extract as ExtractClientIpSources; use crate::servers::http::v1::extractors::scrape_request::ExtractRequest; use crate::servers::http::v1::requests::scrape::Scrape; use crate::servers::http::v1::services::peer_ip_resolver::{self, ClientIpSources}; use crate::servers::http::v1::{responses, services}; -use crate::tracker::auth::Key; -use crate::tracker::{ScrapeData, Tracker}; /// It handles the `scrape` request when the HTTP tracker is configured /// to run in `public` mode. @@ -90,7 +90,7 @@ async fn handle_scrape( // Authorization for scrape requests is handled at the `Tracker` level // for each torrent. - let peer_ip = match peer_ip_resolver::invoke(tracker.config.on_reverse_proxy, client_ip_sources) { + let peer_ip = match peer_ip_resolver::invoke(tracker.is_behind_reverse_proxy(), client_ip_sources) { Ok(peer_ip) => peer_ip, Err(error) => return Err(responses::error::Error::from(error)), }; @@ -111,29 +111,29 @@ mod tests { use std::net::IpAddr; use std::str::FromStr; + use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; + use crate::core::services::tracker_factory; + use crate::core::Tracker; use crate::servers::http::v1::requests::scrape::Scrape; use crate::servers::http::v1::responses; use crate::servers::http::v1::services::peer_ip_resolver::ClientIpSources; - use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::tracker::services::tracker_factory; - use crate::tracker::Tracker; fn private_tracker() -> Tracker { - tracker_factory(configuration::ephemeral_mode_private().into()) + tracker_factory(&configuration::ephemeral_private()) } fn whitelisted_tracker() -> Tracker { - tracker_factory(configuration::ephemeral_mode_whitelisted().into()) + tracker_factory(&configuration::ephemeral_listed()) } fn tracker_on_reverse_proxy() -> Tracker { - tracker_factory(configuration::ephemeral_with_reverse_proxy().into()) + tracker_factory(&configuration::ephemeral_with_reverse_proxy()) } fn tracker_not_on_reverse_proxy() -> Tracker { - tracker_factory(configuration::ephemeral_without_reverse_proxy().into()) + tracker_factory(&configuration::ephemeral_without_reverse_proxy()) } fn sample_scrape_request() -> Scrape { @@ -161,8 +161,8 @@ mod tests { use std::sync::Arc; use super::{private_tracker, sample_client_ip_sources, sample_scrape_request}; + use crate::core::{auth, ScrapeData}; use crate::servers::http::v1::handlers::scrape::handle_scrape; - use crate::tracker::{auth, ScrapeData}; #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_authentication_key_is_missing() { @@ -203,8 +203,8 @@ mod tests { use std::sync::Arc; use super::{sample_client_ip_sources, sample_scrape_request, whitelisted_tracker}; + use crate::core::ScrapeData; use crate::servers::http::v1::handlers::scrape::handle_scrape; - use crate::tracker::ScrapeData; #[tokio::test] async fn it_should_return_zeroed_swarm_metadata_when_the_torrent_is_not_whitelisted() { diff --git a/src/servers/http/v1/launcher.rs b/src/servers/http/v1/launcher.rs deleted file mode 100644 index b5faf8d46..000000000 --- a/src/servers/http/v1/launcher.rs +++ /dev/null @@ -1,173 +0,0 @@ -//! Logic to start new HTTP server instances. -use std::future::Future; -use std::net::SocketAddr; -use std::str::FromStr; -use std::sync::Arc; - -use async_trait::async_trait; -use axum_server::tls_rustls::RustlsConfig; -use axum_server::Handle; -use futures::future::BoxFuture; -use log::info; - -use super::routes::router; -use crate::servers::http::server::HttpServerLauncher; -use crate::tracker::Tracker; - -#[derive(Debug)] -pub enum Error { - Error(String), -} - -pub struct Launcher; - -impl Launcher { - /// It starts a new HTTP server instance from a TCP listener with graceful shutdown. - /// - /// # Panics - /// - /// Will panic if: - /// - /// - The TCP listener could not be bound. - /// - The Axum server crashes. - pub fn start_from_tcp_listener_with_graceful_shutdown( - tcp_listener: std::net::TcpListener, - tracker: Arc, - shutdown_signal: F, - ) -> BoxFuture<'static, ()> - where - F: Future + Send + 'static, - { - let app = router(tracker); - - Box::pin(async { - axum::Server::from_tcp(tcp_listener) - .expect("Could not bind to tcp listener.") - .serve(app.into_make_service_with_connect_info::()) - .with_graceful_shutdown(shutdown_signal) - .await - .expect("Axum server crashed."); - }) - } - - /// It starts a new HTTPS server instance from a TCP listener with graceful shutdown. - /// - /// # Panics - /// - /// Will panic if: - /// - /// - The SSL certificate could not be read from the provided path or is invalid. - /// - The Axum server crashes. - pub fn start_tls_from_tcp_listener_with_graceful_shutdown( - tcp_listener: std::net::TcpListener, - (ssl_cert_path, ssl_key_path): (String, String), - tracker: Arc, - shutdown_signal: F, - ) -> BoxFuture<'static, ()> - where - F: Future + Send + 'static, - { - let app = router(tracker); - - let handle = Handle::new(); - - let cloned_handle = handle.clone(); - - tokio::task::spawn_local(async move { - shutdown_signal.await; - cloned_handle.shutdown(); - }); - - Box::pin(async { - let tls_config = RustlsConfig::from_pem_file(ssl_cert_path, ssl_key_path) - .await - .expect("Could not read tls cert."); - - axum_server::from_tcp_rustls(tcp_listener, tls_config) - .handle(handle) - .serve(app.into_make_service_with_connect_info::()) - .await - .expect("Axum server crashed."); - }) - } -} - -#[async_trait] -impl HttpServerLauncher for Launcher { - fn new() -> Self { - Self {} - } - - fn start_with_graceful_shutdown( - &self, - cfg: torrust_tracker_configuration::HttpTracker, - tracker: Arc, - shutdown_signal: F, - ) -> (SocketAddr, BoxFuture<'static, ()>) - where - F: Future + Send + 'static, - { - let addr = SocketAddr::from_str(&cfg.bind_address).expect("bind_address is not a valid SocketAddr."); - let tcp_listener = std::net::TcpListener::bind(addr).expect("Could not bind tcp_listener to address."); - let bind_addr = tcp_listener - .local_addr() - .expect("Could not get local_addr from tcp_listener."); - - if let (true, Some(ssl_cert_path), Some(ssl_key_path)) = (cfg.ssl_enabled, &cfg.ssl_cert_path, &cfg.ssl_key_path) { - let server = Self::start_tls_from_tcp_listener_with_graceful_shutdown( - tcp_listener, - (ssl_cert_path.to_string(), ssl_key_path.to_string()), - tracker, - shutdown_signal, - ); - - (bind_addr, server) - } else { - let server = Self::start_from_tcp_listener_with_graceful_shutdown(tcp_listener, tracker, shutdown_signal); - - (bind_addr, server) - } - } -} - -/// Starts a new HTTP server instance. -/// -/// # Panics -/// -/// Panics if the server could not listen to shutdown (ctrl+c) signal. -pub fn start(socket_addr: std::net::SocketAddr, tracker: Arc) -> impl Future> { - let app = router(tracker); - - let server = axum::Server::bind(&socket_addr).serve(app.into_make_service_with_connect_info::()); - - server.with_graceful_shutdown(async move { - tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); - info!("Stopping Torrust HTTP tracker server on http://{} ...", socket_addr); - }) -} - -/// Starts a new HTTPS server instance. -/// -/// # Panics -/// -/// Panics if the server could not listen to shutdown (ctrl+c) signal. -pub fn start_tls( - socket_addr: std::net::SocketAddr, - ssl_config: RustlsConfig, - tracker: Arc, -) -> impl Future> { - let app = router(tracker); - - let handle = Handle::new(); - let shutdown_handle = handle.clone(); - - tokio::spawn(async move { - tokio::signal::ctrl_c().await.expect("Failed to listen to shutdown signal."); - info!("Stopping Torrust HTTP tracker server on https://{} ...", socket_addr); - shutdown_handle.shutdown(); - }); - - axum_server::bind_rustls(socket_addr, ssl_config) - .handle(handle) - .serve(app.into_make_service_with_connect_info::()) -} diff --git a/src/servers/http/v1/mod.rs b/src/servers/http/v1/mod.rs index 464a7ee14..9d2745692 100644 --- a/src/servers/http/v1/mod.rs +++ b/src/servers/http/v1/mod.rs @@ -4,7 +4,6 @@ //! more information about the endpoints and their usage. pub mod extractors; pub mod handlers; -pub mod launcher; pub mod query; pub mod requests; pub mod responses; diff --git a/src/servers/http/v1/query.rs b/src/servers/http/v1/query.rs index 745796b61..3a078daae 100644 --- a/src/servers/http/v1/query.rs +++ b/src/servers/http/v1/query.rs @@ -93,7 +93,7 @@ impl Query { } } -/// This error can be returned when parsing a [`Query`](crate::servers::http::v1::query::Query) +/// This error can be returned when parsing a [`Query`] /// from a string. #[derive(Error, Debug)] pub enum ParseQueryError { diff --git a/src/servers/http/v1/requests/announce.rs b/src/servers/http/v1/requests/announce.rs index c330ca3bd..83cc7ddf9 100644 --- a/src/servers/http/v1/requests/announce.rs +++ b/src/servers/http/v1/requests/announce.rs @@ -7,12 +7,12 @@ use std::str::FromStr; use thiserror::Error; use torrust_tracker_located_error::{Located, LocatedError}; +use torrust_tracker_primitives::info_hash::{self, InfoHash}; +use torrust_tracker_primitives::peer; use crate::servers::http::percent_encoding::{percent_decode_info_hash, percent_decode_peer_id}; use crate::servers::http::v1::query::{ParseQueryError, Query}; use crate::servers::http::v1::responses; -use crate::shared::bit_torrent::info_hash::{ConversionError, InfoHash}; -use crate::tracker::peer::{self, IdConversionError}; /// The number of bytes `downloaded`, `uploaded` or `left`. It's used in the /// `Announce` request for parameters that represent a number of bytes. @@ -33,8 +33,8 @@ const COMPACT: &str = "compact"; /// /// ```rust /// use torrust_tracker::servers::http::v1::requests::announce::{Announce, Compact, Event}; -/// use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -/// use torrust_tracker::tracker::peer; +/// use torrust_tracker_primitives::info_hash::InfoHash; +/// use torrust_tracker_primitives::peer; /// /// let request = Announce { /// // Mandatory params @@ -51,12 +51,12 @@ const COMPACT: &str = "compact"; /// ``` /// /// > **NOTICE**: The [BEP 03. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) -/// specifies that only the peer `IP` and `event`are optional. However, the -/// tracker defines default values for some of the mandatory params. +/// > specifies that only the peer `IP` and `event`are optional. However, the +/// > tracker defines default values for some of the mandatory params. /// /// > **NOTICE**: The struct does not contain the `IP` of the peer. It's not -/// mandatory and it's not used by the tracker. The `IP` is obtained from the -/// request itself. +/// > mandatory and it's not used by the tracker. The `IP` is obtained from the +/// > request itself. #[derive(Debug, PartialEq)] pub struct Announce { // Mandatory params @@ -119,14 +119,14 @@ pub enum ParseAnnounceQueryError { InvalidInfoHashParam { param_name: String, param_value: String, - source: LocatedError<'static, ConversionError>, + source: LocatedError<'static, info_hash::ConversionError>, }, /// The `peer_id` is invalid. #[error("invalid param value {param_value} for {param_name} in {source}")] InvalidPeerIdParam { param_name: String, param_value: String, - source: LocatedError<'static, IdConversionError>, + source: LocatedError<'static, peer::IdConversionError>, }, } @@ -180,7 +180,7 @@ impl fmt::Display for Event { /// Depending on the value of this param, the tracker will return a different /// response: /// -/// - [`NonCompact`](crate::servers::http::v1::responses::announce::NonCompact) response. +/// - [`Normal`](crate::servers::http::v1::responses::announce::Normal), i.e. a `non-compact` response. /// - [`Compact`](crate::servers::http::v1::responses::announce::Compact) response. /// /// Refer to [BEP 23. Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) @@ -355,12 +355,13 @@ mod tests { mod announce_request { + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer; + use crate::servers::http::v1::query::Query; use crate::servers::http::v1::requests::announce::{ Announce, Compact, Event, COMPACT, DOWNLOADED, EVENT, INFO_HASH, LEFT, PEER_ID, PORT, UPLOADED, }; - use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::tracker::peer; #[test] fn should_be_instantiated_from_the_url_query_with_only_the_mandatory_params() { diff --git a/src/servers/http/v1/requests/scrape.rs b/src/servers/http/v1/requests/scrape.rs index 7c52b9fc4..19f6e35a6 100644 --- a/src/servers/http/v1/requests/scrape.rs +++ b/src/servers/http/v1/requests/scrape.rs @@ -5,11 +5,11 @@ use std::panic::Location; use thiserror::Error; use torrust_tracker_located_error::{Located, LocatedError}; +use torrust_tracker_primitives::info_hash::{self, InfoHash}; use crate::servers::http::percent_encoding::percent_decode_info_hash; use crate::servers::http::v1::query::Query; use crate::servers::http::v1::responses; -use crate::shared::bit_torrent::info_hash::{ConversionError, InfoHash}; pub type NumberOfBytes = i64; @@ -34,7 +34,7 @@ pub enum ParseScrapeQueryError { InvalidInfoHashParam { param_name: String, param_value: String, - source: LocatedError<'static, ConversionError>, + source: LocatedError<'static, info_hash::ConversionError>, }, } @@ -86,9 +86,10 @@ mod tests { mod scrape_request { + use torrust_tracker_primitives::info_hash::InfoHash; + use crate::servers::http::v1::query::Query; use crate::servers::http::v1::requests::scrape::{Scrape, INFO_HASH}; - use crate::shared::bit_torrent::info_hash::InfoHash; #[test] fn should_be_instantiated_from_the_url_query_with_only_one_infohash() { diff --git a/src/servers/http/v1/responses/announce.rs b/src/servers/http/v1/responses/announce.rs index 3596275f4..134da919e 100644 --- a/src/servers/http/v1/responses/announce.rs +++ b/src/servers/http/v1/responses/announce.rs @@ -2,269 +2,202 @@ //! //! Data structures and logic to build the `announce` response. use std::io::Write; -use std::net::IpAddr; -use std::panic::Location; +use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use axum::http::StatusCode; -use axum::response::{IntoResponse, Response}; -use serde::{self, Deserialize, Serialize}; -use thiserror::Error; +use derive_more::{AsRef, Constructor, From}; use torrust_tracker_contrib_bencode::{ben_bytes, ben_int, ben_list, ben_map, BMutAccess, BencodeMut}; +use torrust_tracker_primitives::peer; +use super::Response; +use crate::core::AnnounceData; use crate::servers::http::v1::responses; -use crate::tracker::{self, AnnounceData}; -/// Normal (non compact) `announce` response. +/// An [`Announce`] response, that can be anything that is convertible from [`AnnounceData`]. /// -/// It's a bencoded dictionary. +/// The [`Announce`] can built from any data that implements: [`From`] and [`Into>`]. /// -/// ```rust -/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; -/// use torrust_tracker::servers::http::v1::responses::announce::{NonCompact, Peer}; -/// -/// let response = NonCompact { -/// interval: 111, -/// interval_min: 222, -/// complete: 333, -/// incomplete: 444, -/// peers: vec![ -/// // IPV4 -/// Peer { -/// peer_id: *b"-qB00000000000000001", -/// ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 -/// port: 0x7070, // 28784 -/// }, -/// // IPV6 -/// Peer { -/// peer_id: *b"-qB00000000000000002", -/// ip: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), -/// port: 0x7070, // 28784 -/// }, -/// ], -/// }; +/// The two standard forms of an announce response are: [`Normal`] and [`Compact`]. /// -/// let bytes = response.body(); /// -/// // The expected bencoded response. -/// let expected_bytes = b"d8:completei333e10:incompletei444e8:intervali111e12:min intervali222e5:peersld2:ip15:105.105.105.1057:peer id20:-qB000000000000000014:porti28784eed2:ip39:6969:6969:6969:6969:6969:6969:6969:69697:peer id20:-qB000000000000000024:porti28784eeee"; +/// _"To reduce the size of tracker responses and to reduce memory and +/// computational requirements in trackers, trackers may return peers as a +/// packed string rather than as a bencoded list."_ /// -/// assert_eq!( -/// String::from_utf8(bytes).unwrap(), -/// String::from_utf8(expected_bytes.to_vec()).unwrap() -/// ); -/// ``` +/// Refer to the official BEPs for more information: /// -/// Refer to [BEP 03: The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) -/// for more information. -#[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct NonCompact { - /// Interval in seconds that the client should wait between sending regular - /// announce requests to the tracker. - /// - /// It's a **recommended** wait time between announcements. - /// - /// This is the standard amount of time that clients should wait between - /// sending consecutive announcements to the tracker. This value is set by - /// the tracker and is typically provided in the tracker's response to a - /// client's initial request. It serves as a guideline for clients to know - /// how often they should contact the tracker for updates on the peer list, - /// while ensuring that the tracker is not overwhelmed with requests. - pub interval: u32, - /// Minimum announce interval. Clients must not reannounce more frequently - /// than this. - /// - /// It establishes the shortest allowed wait time. - /// - /// This is an optional parameter in the protocol that the tracker may - /// provide in its response. It sets a lower limit on the frequency at which - /// clients are allowed to send announcements. Clients should respect this - /// value to prevent sending too many requests in a short period, which - /// could lead to excessive load on the tracker or even getting banned by - /// the tracker for not adhering to the rules. - #[serde(rename = "min interval")] - pub interval_min: u32, - /// Number of peers with the entire file, i.e. seeders. - pub complete: u32, - /// Number of non-seeder peers, aka "leechers". - pub incomplete: u32, - /// A list of peers. The value is a list of dictionaries. - pub peers: Vec, +/// - [BEP 03: The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) +/// - [BEP 23: Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) +/// - [BEP 07: IPv6 Tracker Extension](https://www.bittorrent.org/beps/bep_0007.html) + +#[derive(Debug, AsRef, PartialEq, Constructor)] +pub struct Announce +where + E: From + Into>, +{ + data: E, } -/// Peer information in the [`NonCompact`](crate::servers::http::v1::responses::announce::NonCompact) -/// response. -/// -/// ```rust -/// use std::net::{IpAddr, Ipv4Addr}; -/// use torrust_tracker::servers::http::v1::responses::announce::{NonCompact, Peer}; -/// -/// let peer = Peer { -/// peer_id: *b"-qB00000000000000001", -/// ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 -/// port: 0x7070, // 28784 -/// }; -/// ``` -#[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct Peer { - /// The peer's ID. - pub peer_id: [u8; 20], - /// The peer's IP address. - pub ip: IpAddr, - /// The peer's port number. - pub port: u16, +/// Build any [`Announce`] from an [`AnnounceData`]. +impl + Into>> From for Announce { + fn from(data: AnnounceData) -> Self { + Self::new(data.into()) + } } -impl Peer { - #[must_use] - pub fn ben_map(&self) -> BencodeMut<'_> { - ben_map! { - "peer id" => ben_bytes!(self.peer_id.clone().to_vec()), - "ip" => ben_bytes!(self.ip.to_string()), - "port" => ben_int!(i64::from(self.port)) - } +/// Convert any Announce [`Announce`] into a [`axum::response::Response`] +impl + Into>> axum::response::IntoResponse for Announce +where + Announce: Response, +{ + fn into_response(self) -> axum::response::Response { + axum::response::IntoResponse::into_response(self.body().map(|bytes| (StatusCode::OK, bytes))) } } -impl From for Peer { - fn from(peer: tracker::peer::Peer) -> Self { - Peer { - peer_id: peer.peer_id.to_bytes(), - ip: peer.peer_addr.ip(), - port: peer.peer_addr.port(), +/// Implement the [`Response`] for the [`Announce`]. +/// +impl + Into>> Response for Announce { + fn body(self) -> Result, responses::error::Error> { + Ok(self.data.into()) + } +} + +/// Format of the [`Normal`] (Non-Compact) Encoding +pub struct Normal { + complete: i64, + incomplete: i64, + interval: i64, + min_interval: i64, + peers: Vec, +} + +impl From for Normal { + fn from(data: AnnounceData) -> Self { + Self { + complete: data.stats.complete.into(), + incomplete: data.stats.incomplete.into(), + interval: data.policy.interval.into(), + min_interval: data.policy.interval_min.into(), + peers: data.peers.iter().map(AsRef::as_ref).copied().collect(), } } } -impl NonCompact { - /// Returns the bencoded body of the non-compact response. - /// - /// # Panics - /// - /// Will return an error if it can't access the bencode as a mutable `BListAccess`. - #[must_use] - pub fn body(&self) -> Vec { +#[allow(clippy::from_over_into)] +impl Into> for Normal { + fn into(self) -> Vec { let mut peers_list = ben_list!(); let peers_list_mut = peers_list.list_mut().unwrap(); for peer in &self.peers { - peers_list_mut.push(peer.ben_map()); + peers_list_mut.push(peer.into()); } (ben_map! { - "complete" => ben_int!(i64::from(self.complete)), - "incomplete" => ben_int!(i64::from(self.incomplete)), - "interval" => ben_int!(i64::from(self.interval)), - "min interval" => ben_int!(i64::from(self.interval_min)), + "complete" => ben_int!(self.complete), + "incomplete" => ben_int!(self.incomplete), + "interval" => ben_int!(self.interval), + "min interval" => ben_int!(self.min_interval), "peers" => peers_list.clone() }) .encode() } } -impl IntoResponse for NonCompact { - fn into_response(self) -> Response { - (StatusCode::OK, self.body()).into_response() - } +/// Format of the [`Compact`] Encoding +pub struct Compact { + complete: i64, + incomplete: i64, + interval: i64, + min_interval: i64, + peers: Vec, + peers6: Vec, } -impl From for NonCompact { - fn from(domain_announce_response: AnnounceData) -> Self { - let peers: Vec = domain_announce_response.peers.iter().map(|peer| Peer::from(*peer)).collect(); +impl From for Compact { + fn from(data: AnnounceData) -> Self { + let compact_peers: Vec = data.peers.iter().map(AsRef::as_ref).copied().collect(); + + let (peers, peers6): (Vec>, Vec>) = + compact_peers.into_iter().collect(); + + let peers_encoded: CompactPeersEncoded = peers.into_iter().collect(); + let peers_encoded_6: CompactPeersEncoded = peers6.into_iter().collect(); Self { - interval: domain_announce_response.interval, - interval_min: domain_announce_response.interval_min, - complete: domain_announce_response.swarm_stats.seeders, - incomplete: domain_announce_response.swarm_stats.leechers, - peers, + complete: data.stats.complete.into(), + incomplete: data.stats.incomplete.into(), + interval: data.policy.interval.into(), + min_interval: data.policy.interval_min.into(), + peers: peers_encoded.0, + peers6: peers_encoded_6.0, } } } -/// Compact `announce` response. -/// -/// _"To reduce the size of tracker responses and to reduce memory and -/// computational requirements in trackers, trackers may return peers as a -/// packed string rather than as a bencoded list."_ +#[allow(clippy::from_over_into)] +impl Into> for Compact { + fn into(self) -> Vec { + (ben_map! { + "complete" => ben_int!(self.complete), + "incomplete" => ben_int!(self.incomplete), + "interval" => ben_int!(self.interval), + "min interval" => ben_int!(self.min_interval), + "peers" => ben_bytes!(self.peers), + "peers6" => ben_bytes!(self.peers6) + }) + .encode() + } +} + +/// A [`NormalPeer`], for the [`Normal`] form. /// /// ```rust -/// use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; -/// use torrust_tracker::servers::http::v1::responses::announce::{Compact, CompactPeer}; +/// use std::net::{IpAddr, Ipv4Addr}; +/// use torrust_tracker::servers::http::v1::responses::announce::{Normal, NormalPeer}; /// -/// let response = Compact { -/// interval: 111, -/// interval_min: 222, -/// complete: 333, -/// incomplete: 444, -/// peers: vec![ -/// // IPV4 -/// CompactPeer { -/// ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 -/// port: 0x7070, // 28784 -/// }, -/// // IPV6 -/// CompactPeer { -/// ip: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), -/// port: 0x7070, // 28784 -/// }, -/// ], +/// let peer = NormalPeer { +/// peer_id: *b"-qB00000000000000001", +/// ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 +/// port: 0x7070, // 28784 /// }; /// -/// let bytes = response.body().unwrap(); -/// -/// // The expected bencoded response. -/// let expected_bytes = -/// // cspell:disable-next-line -/// b"d8:completei333e10:incompletei444e8:intervali111e12:min intervali222e5:peers6:iiiipp6:peers618:iiiiiiiiiiiiiiiippe"; -/// -/// assert_eq!( -/// String::from_utf8(bytes).unwrap(), -/// String::from_utf8(expected_bytes.to_vec()).unwrap() -/// ); -/// ``` -/// -/// Refer to the official BEPs for more information: -/// -/// - [BEP 23: Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) -/// - [BEP 07: IPv6 Tracker Extension](https://www.bittorrent.org/beps/bep_0007.html) -#[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct Compact { - /// Interval in seconds that the client should wait between sending regular - /// announce requests to the tracker. - /// - /// It's a **recommended** wait time between announcements. - /// - /// This is the standard amount of time that clients should wait between - /// sending consecutive announcements to the tracker. This value is set by - /// the tracker and is typically provided in the tracker's response to a - /// client's initial request. It serves as a guideline for clients to know - /// how often they should contact the tracker for updates on the peer list, - /// while ensuring that the tracker is not overwhelmed with requests. - pub interval: u32, - /// Minimum announce interval. Clients must not reannounce more frequently - /// than this. - /// - /// It establishes the shortest allowed wait time. - /// - /// This is an optional parameter in the protocol that the tracker may - /// provide in its response. It sets a lower limit on the frequency at which - /// clients are allowed to send announcements. Clients should respect this - /// value to prevent sending too many requests in a short period, which - /// could lead to excessive load on the tracker or even getting banned by - /// the tracker for not adhering to the rules. - #[serde(rename = "min interval")] - pub interval_min: u32, - /// Number of seeders, aka "completed". - pub complete: u32, - /// Number of non-seeder peers, aka "incomplete". - pub incomplete: u32, - /// Compact peer list. - pub peers: Vec, +/// ``` +#[derive(Debug, PartialEq)] +pub struct NormalPeer { + /// The peer's ID. + pub peer_id: [u8; 20], + /// The peer's IP address. + pub ip: IpAddr, + /// The peer's port number. + pub port: u16, +} + +impl peer::Encoding for NormalPeer {} + +impl From for NormalPeer { + fn from(peer: peer::Peer) -> Self { + NormalPeer { + peer_id: peer.peer_id.to_bytes(), + ip: peer.peer_addr.ip(), + port: peer.peer_addr.port(), + } + } } -/// Compact peer. It's used in the [`Compact`](crate::servers::http::v1::responses::announce::Compact) -/// response. +impl From<&NormalPeer> for BencodeMut<'_> { + fn from(value: &NormalPeer) -> Self { + ben_map! { + "peer id" => ben_bytes!(value.peer_id.clone().to_vec()), + "ip" => ben_bytes!(value.ip.to_string()), + "port" => ben_int!(i64::from(value.port)) + } + } +} + +/// A [`CompactPeer`], for the [`Compact`] form. /// -/// _"To reduce the size of tracker responses and to reduce memory and +/// _"To reduce the size of tracker responses and to reduce memory and /// computational requirements in trackers, trackers may return peers as a /// packed string rather than as a bencoded list."_ /// @@ -272,160 +205,108 @@ pub struct Compact { /// the peer's ID. /// /// ```rust -/// use std::net::{IpAddr, Ipv4Addr}; -/// use torrust_tracker::servers::http::v1::responses::announce::CompactPeer; +/// use std::net::{IpAddr, Ipv4Addr}; +/// use torrust_tracker::servers::http::v1::responses::announce::{Compact, CompactPeer, CompactPeerData}; /// -/// let compact_peer = CompactPeer { -/// ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 -/// port: 0x7070 // 28784 -/// }; -/// ``` +/// let peer = CompactPeer::V4(CompactPeerData { +/// ip: Ipv4Addr::new(0x69, 0x69, 0x69, 0x69), // 105.105.105.105 +/// port: 0x7070, // 28784 +/// }); +/// +/// ``` /// /// Refer to [BEP 23: Tracker Returns Compact Peer Lists](https://www.bittorrent.org/beps/bep_0023.html) /// for more information. -#[derive(Serialize, Deserialize, Debug, PartialEq)] -pub struct CompactPeer { +#[derive(Clone, Debug, PartialEq)] +pub enum CompactPeer { /// The peer's IP address. - pub ip: IpAddr, + V4(CompactPeerData), /// The peer's port number. - pub port: u16, + V6(CompactPeerData), } -impl CompactPeer { - /// Returns the compact peer as a byte vector. - /// - /// # Errors - /// - /// Will return `Err` if internally interrupted. - pub fn bytes(&self) -> Result, Box> { - let mut bytes: Vec = Vec::new(); - match self.ip { - IpAddr::V4(ip) => { - bytes.write_all(&u32::from(ip).to_be_bytes())?; - } - IpAddr::V6(ip) => { - bytes.write_all(&u128::from(ip).to_be_bytes())?; - } - } - bytes.write_all(&self.port.to_be_bytes())?; - Ok(bytes) - } -} +impl peer::Encoding for CompactPeer {} -impl From for CompactPeer { - fn from(peer: tracker::peer::Peer) -> Self { - CompactPeer { - ip: peer.peer_addr.ip(), - port: peer.peer_addr.port(), +impl From for CompactPeer { + fn from(peer: peer::Peer) -> Self { + match (peer.peer_addr.ip(), peer.peer_addr.port()) { + (IpAddr::V4(ip), port) => Self::V4(CompactPeerData { ip, port }), + (IpAddr::V6(ip), port) => Self::V6(CompactPeerData { ip, port }), } } } -impl Compact { - /// Returns the bencoded compact response as a byte vector. - /// - /// # Errors - /// - /// Will return `Err` if internally interrupted. - pub fn body(&self) -> Result, Box> { - let bytes = (ben_map! { - "complete" => ben_int!(i64::from(self.complete)), - "incomplete" => ben_int!(i64::from(self.incomplete)), - "interval" => ben_int!(i64::from(self.interval)), - "min interval" => ben_int!(i64::from(self.interval_min)), - "peers" => ben_bytes!(self.peers_v4_bytes()?), - "peers6" => ben_bytes!(self.peers_v6_bytes()?) - }) - .encode(); +/// The [`CompactPeerData`], that made with either a [`Ipv4Addr`], or [`Ipv6Addr`] along with a `port`. +/// +#[derive(Clone, Debug, PartialEq)] +pub struct CompactPeerData { + /// The peer's IP address. + pub ip: V, + /// The peer's port number. + pub port: u16, +} - Ok(bytes) - } +impl FromIterator for (Vec>, Vec>) { + fn from_iter>(iter: T) -> Self { + let mut peers_v4: Vec> = vec![]; + let mut peers_v6: Vec> = vec![]; - fn peers_v4_bytes(&self) -> Result, Box> { - let mut bytes: Vec = Vec::new(); - for compact_peer in &self.peers { - match compact_peer.ip { - IpAddr::V4(_ip) => { - let peer_bytes = compact_peer.bytes()?; - bytes.write_all(&peer_bytes)?; - } - IpAddr::V6(_) => {} + for peer in iter { + match peer { + CompactPeer::V4(peer) => peers_v4.push(peer), + CompactPeer::V6(peer6) => peers_v6.push(peer6), } } - Ok(bytes) - } - fn peers_v6_bytes(&self) -> Result, Box> { - let mut bytes: Vec = Vec::new(); - for compact_peer in &self.peers { - match compact_peer.ip { - IpAddr::V6(_ip) => { - let peer_bytes = compact_peer.bytes()?; - bytes.write_all(&peer_bytes)?; - } - IpAddr::V4(_) => {} - } - } - Ok(bytes) + (peers_v4, peers_v6) } } -/// `Compact` response serialization error. -#[derive(Error, Debug)] -pub enum CompactSerializationError { - #[error("cannot write bytes: {inner_error} in {location}")] - CannotWriteBytes { - location: &'static Location<'static>, - inner_error: String, - }, -} +#[derive(From, PartialEq)] +struct CompactPeersEncoded(Vec); -impl From for responses::error::Error { - fn from(err: CompactSerializationError) -> Self { - responses::error::Error { - failure_reason: format!("{err}"), - } - } -} +impl FromIterator> for CompactPeersEncoded { + fn from_iter>>(iter: T) -> Self { + let mut bytes: Vec = vec![]; -impl IntoResponse for Compact { - fn into_response(self) -> Response { - match self.body() { - Ok(bytes) => (StatusCode::OK, bytes).into_response(), - Err(err) => responses::error::Error::from(CompactSerializationError::CannotWriteBytes { - location: Location::caller(), - inner_error: format!("{err}"), - }) - .into_response(), + for peer in iter { + bytes + .write_all(&u32::from(peer.ip).to_be_bytes()) + .expect("it should write peer ip"); + bytes.write_all(&peer.port.to_be_bytes()).expect("it should write peer port"); } + + bytes.into() } } -impl From for Compact { - fn from(domain_announce_response: AnnounceData) -> Self { - let peers: Vec = domain_announce_response - .peers - .iter() - .map(|peer| CompactPeer::from(*peer)) - .collect(); +impl FromIterator> for CompactPeersEncoded { + fn from_iter>>(iter: T) -> Self { + let mut bytes: Vec = Vec::new(); - Self { - interval: domain_announce_response.interval, - interval_min: domain_announce_response.interval_min, - complete: domain_announce_response.swarm_stats.seeders, - incomplete: domain_announce_response.swarm_stats.leechers, - peers, + for peer in iter { + bytes + .write_all(&u128::from(peer.ip).to_be_bytes()) + .expect("it should write peer ip"); + bytes.write_all(&peer.port.to_be_bytes()).expect("it should write peer port"); } + bytes.into() } } #[cfg(test)] mod tests { - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::sync::Arc; - use super::{NonCompact, Peer}; - use crate::servers::http::v1::responses::announce::{Compact, CompactPeer}; + use torrust_tracker_configuration::AnnouncePolicy; + use torrust_tracker_primitives::peer; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + + use crate::core::AnnounceData; + use crate::servers::http::v1::responses::announce::{Announce, Compact, Normal, Response}; // Some ascii values used in tests: // @@ -439,30 +320,32 @@ mod tests { // IP addresses and port numbers used in tests are chosen so that their bencoded representation // is also a valid string which makes asserts more readable. + fn setup_announce_data() -> AnnounceData { + let policy = AnnouncePolicy::new(111, 222); + + let peer_ipv4 = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), 0x7070)) + .build(); + + let peer_ipv6 = PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000002")) + .with_peer_addr(&SocketAddr::new( + IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), + 0x7070, + )) + .build(); + + let peers = vec![Arc::new(peer_ipv4), Arc::new(peer_ipv6)]; + let stats = SwarmMetadata::new(333, 333, 444); + + AnnounceData::new(peers, stats, policy) + } + #[test] fn non_compact_announce_response_can_be_bencoded() { - let response = NonCompact { - interval: 111, - interval_min: 222, - complete: 333, - incomplete: 444, - peers: vec![ - // IPV4 - Peer { - peer_id: *b"-qB00000000000000001", - ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 - port: 0x7070, // 28784 - }, - // IPV6 - Peer { - peer_id: *b"-qB00000000000000002", - ip: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), - port: 0x7070, // 28784 - }, - ], - }; - - let bytes = response.body(); + let response: Announce = setup_announce_data().into(); + let bytes = response.body().expect("it should encode the response"); // cspell:disable-next-line let expected_bytes = b"d8:completei333e10:incompletei444e8:intervali111e12:min intervali222e5:peersld2:ip15:105.105.105.1057:peer id20:-qB000000000000000014:porti28784eed2:ip39:6969:6969:6969:6969:6969:6969:6969:69697:peer id20:-qB000000000000000024:porti28784eeee"; @@ -475,26 +358,8 @@ mod tests { #[test] fn compact_announce_response_can_be_bencoded() { - let response = Compact { - interval: 111, - interval_min: 222, - complete: 333, - incomplete: 444, - peers: vec![ - // IPV4 - CompactPeer { - ip: IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), // 105.105.105.105 - port: 0x7070, // 28784 - }, - // IPV6 - CompactPeer { - ip: IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)), - port: 0x7070, // 28784 - }, - ], - }; - - let bytes = response.body().unwrap(); + let response: Announce = setup_announce_data().into(); + let bytes = response.body().expect("it should encode the response"); let expected_bytes = // cspell:disable-next-line diff --git a/src/servers/http/v1/responses/error.rs b/src/servers/http/v1/responses/error.rs index 606ead3b2..c406c797a 100644 --- a/src/servers/http/v1/responses/error.rs +++ b/src/servers/http/v1/responses/error.rs @@ -9,11 +9,11 @@ //! why the query failed, and no other keys are required."_ //! //! > **NOTICE**: error responses are bencoded and always have a `200 OK` status -//! code. The official `BitTorrent` specification does not specify the status -//! code. +//! > code. The official `BitTorrent` specification does not specify the status +//! > code. use axum::http::StatusCode; use axum::response::{IntoResponse, Response}; -use serde::{self, Serialize}; +use serde::Serialize; /// `Error` response for the [`HTTP tracker`](crate::servers::http). #[derive(Serialize, Debug, PartialEq)] diff --git a/src/servers/http/v1/responses/mod.rs b/src/servers/http/v1/responses/mod.rs index 3c6632fed..e22879c6d 100644 --- a/src/servers/http/v1/responses/mod.rs +++ b/src/servers/http/v1/responses/mod.rs @@ -5,3 +5,15 @@ pub mod announce; pub mod error; pub mod scrape; + +pub use announce::{Announce, Compact, Normal}; + +/// Trait that defines the Announce Response Format +pub trait Response: axum::response::IntoResponse { + /// Returns the Body of the Announce Response + /// + /// # Errors + /// + /// If unable to generate the response, it will return an error. + fn body(self) -> Result, error::Error>; +} diff --git a/src/servers/http/v1/responses/scrape.rs b/src/servers/http/v1/responses/scrape.rs index 9cd88b9ab..11f361028 100644 --- a/src/servers/http/v1/responses/scrape.rs +++ b/src/servers/http/v1/responses/scrape.rs @@ -7,15 +7,15 @@ use axum::http::StatusCode; use axum::response::{IntoResponse, Response}; use torrust_tracker_contrib_bencode::{ben_int, ben_map, BMutAccess}; -use crate::tracker::ScrapeData; +use crate::core::ScrapeData; /// The `Scrape` response for the HTTP tracker. /// /// ```rust /// use torrust_tracker::servers::http::v1::responses::scrape::Bencoded; -/// use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -/// use torrust_tracker::tracker::torrent::SwarmMetadata; -/// use torrust_tracker::tracker::ScrapeData; +/// use torrust_tracker_primitives::info_hash::InfoHash; +/// use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; +/// use torrust_tracker::core::ScrapeData; /// /// let info_hash = InfoHash([0x69; 20]); /// let mut scrape_data = ScrapeData::empty(); @@ -92,10 +92,11 @@ impl IntoResponse for Bencoded { mod tests { mod scrape_response { + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; + + use crate::core::ScrapeData; use crate::servers::http::v1::responses::scrape::Bencoded; - use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::tracker::torrent::SwarmMetadata; - use crate::tracker::ScrapeData; fn sample_scrape_data() -> ScrapeData { let info_hash = InfoHash([0x69; 20]); diff --git a/src/servers/http/v1/routes.rs b/src/servers/http/v1/routes.rs index 6546dcbb8..c24797c4a 100644 --- a/src/servers/http/v1/routes.rs +++ b/src/servers/http/v1/routes.rs @@ -1,21 +1,37 @@ //! HTTP server routes for version `v1`. +use std::net::SocketAddr; use std::sync::Arc; +use std::time::Duration; +use axum::error_handling::HandleErrorLayer; +use axum::http::HeaderName; +use axum::response::Response; use axum::routing::get; -use axum::Router; +use axum::{BoxError, Router}; use axum_client_ip::SecureClientIpSource; +use hyper::{Request, StatusCode}; +use torrust_tracker_configuration::DEFAULT_TIMEOUT; +use tower::timeout::TimeoutLayer; +use tower::ServiceBuilder; use tower_http::compression::CompressionLayer; +use tower_http::propagate_header::PropagateHeaderLayer; +use tower_http::request_id::{MakeRequestUuid, SetRequestIdLayer}; +use tower_http::trace::{DefaultMakeSpan, TraceLayer}; +use tracing::{Level, Span}; -use super::handlers::{announce, scrape}; -use crate::tracker::Tracker; +use super::handlers::{announce, health_check, scrape}; +use crate::core::Tracker; +use crate::servers::http::HTTP_TRACKER_LOG_TARGET; /// It adds the routes to the router. /// /// > **NOTICE**: it's added a layer to get the client IP from the connection -/// info. The tracker could use the connection info to get the client IP. +/// > info. The tracker could use the connection info to get the client IP. #[allow(clippy::needless_pass_by_value)] -pub fn router(tracker: Arc) -> Router { +pub fn router(tracker: Arc, server_socket_addr: SocketAddr) -> Router { Router::new() + // Health check + .route("/health_check", get(health_check::handler)) // Announce request .route("/announce", get(announce::handle_without_key).with_state(tracker.clone())) .route("/announce/:key", get(announce::handle_with_key).with_state(tracker.clone())) @@ -25,4 +41,44 @@ pub fn router(tracker: Arc) -> Router { // Add extension to get the client IP from the connection info .layer(SecureClientIpSource::ConnectInfo.into_extension()) .layer(CompressionLayer::new()) + .layer(SetRequestIdLayer::x_request_id(MakeRequestUuid)) + .layer(PropagateHeaderLayer::new(HeaderName::from_static("x-request-id"))) + .layer( + TraceLayer::new_for_http() + .make_span_with(DefaultMakeSpan::new().level(Level::INFO)) + .on_request(move |request: &Request, _span: &Span| { + let method = request.method().to_string(); + let uri = request.uri().to_string(); + let request_id = request + .headers() + .get("x-request-id") + .map(|v| v.to_str().unwrap_or_default()) + .unwrap_or_default(); + + tracing::span!( + target: HTTP_TRACKER_LOG_TARGET, + tracing::Level::INFO, "request", server_socket_addr= %server_socket_addr, method = %method, uri = %uri, request_id = %request_id); + }) + .on_response(move |response: &Response, latency: Duration, _span: &Span| { + let status_code = response.status(); + let request_id = response + .headers() + .get("x-request-id") + .map(|v| v.to_str().unwrap_or_default()) + .unwrap_or_default(); + let latency_ms = latency.as_millis(); + + tracing::span!( + target: HTTP_TRACKER_LOG_TARGET, + tracing::Level::INFO, "response", server_socket_addr= %server_socket_addr, latency = %latency_ms, status = %status_code, request_id = %request_id); + }), + ) + .layer(SetRequestIdLayer::x_request_id(MakeRequestUuid)) + .layer( + ServiceBuilder::new() + // this middleware goes above `TimeoutLayer` because it will receive + // errors returned by `TimeoutLayer` + .layer(HandleErrorLayer::new(|_: BoxError| async { StatusCode::REQUEST_TIMEOUT })) + .layer(TimeoutLayer::new(DEFAULT_TIMEOUT)), + ) } diff --git a/src/servers/http/v1/services/announce.rs b/src/servers/http/v1/services/announce.rs index 4c1b262ba..f5f730ae2 100644 --- a/src/servers/http/v1/services/announce.rs +++ b/src/servers/http/v1/services/announce.rs @@ -2,18 +2,19 @@ //! //! The service is responsible for handling the `announce` requests. //! -//! It delegates the `announce` logic to the [`Tracker`](crate::tracker::Tracker::announce) -//! and it returns the [`AnnounceData`](crate::tracker::AnnounceData) returned -//! by the [`Tracker`](crate::tracker::Tracker). +//! It delegates the `announce` logic to the [`Tracker`](crate::core::Tracker::announce) +//! and it returns the [`AnnounceData`] returned +//! by the [`Tracker`]. //! -//! It also sends an [`statistics::Event`](crate::tracker::statistics::Event) +//! It also sends an [`statistics::Event`] //! because events are specific for the HTTP tracker. use std::net::IpAddr; use std::sync::Arc; -use crate::shared::bit_torrent::info_hash::InfoHash; -use crate::tracker::peer::Peer; -use crate::tracker::{statistics, AnnounceData, Tracker}; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer; + +use crate::core::{statistics, AnnounceData, Tracker}; /// The HTTP tracker `announce` service. /// @@ -23,13 +24,13 @@ use crate::tracker::{statistics, AnnounceData, Tracker}; /// - The number of TCP `announce` requests handled by the HTTP tracker. /// /// > **NOTICE**: as the HTTP tracker does not requires a connection request -/// like the UDP tracker, the number of TCP connections is incremented for -/// each `announce` request. -pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut Peer) -> AnnounceData { +/// > like the UDP tracker, the number of TCP connections is incremented for +/// > each `announce` request. +pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut peer::Peer) -> AnnounceData { let original_peer_ip = peer.peer_addr.ip(); // The tracker could change the original peer ip - let announce_data = tracker.announce(&info_hash, peer, &original_peer_ip).await; + let announce_data = tracker.announce(&info_hash, peer, &original_peer_ip); match original_peer_ip { IpAddr::V4(_) => { @@ -47,16 +48,16 @@ pub async fn invoke(tracker: Arc, info_hash: InfoHash, peer: &mut Peer) mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; - use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::shared::clock::DurationSinceUnixEpoch; - use crate::tracker::services::tracker_factory; - use crate::tracker::{peer, Tracker}; + use crate::core::services::tracker_factory; + use crate::core::Tracker; fn public_tracker() -> Tracker { - tracker_factory(configuration::ephemeral_mode_public().into()) + tracker_factory(&configuration::ephemeral_public()) } fn sample_info_hash() -> InfoHash { @@ -94,14 +95,14 @@ mod tests { use std::sync::Arc; use mockall::predicate::eq; + use torrust_tracker_primitives::peer; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_test_helpers::configuration; use super::{sample_peer_using_ipv4, sample_peer_using_ipv6}; + use crate::core::{statistics, AnnounceData, Tracker}; use crate::servers::http::v1::services::announce::invoke; use crate::servers::http::v1::services::announce::tests::{public_tracker, sample_info_hash, sample_peer}; - use crate::tracker::peer::Peer; - use crate::tracker::torrent::SwarmStats; - use crate::tracker::{statistics, AnnounceData, Tracker}; #[tokio::test] async fn it_should_return_the_announce_data() { @@ -113,13 +114,12 @@ mod tests { let expected_announce_data = AnnounceData { peers: vec![], - swarm_stats: SwarmStats { - completed: 0, - seeders: 1, - leechers: 0, + stats: SwarmMetadata { + downloaded: 0, + complete: 1, + incomplete: 0, }, - interval: tracker.config.announce_interval, - interval_min: tracker.config.min_announce_interval, + policy: tracker.get_announce_policy(), }; assert_eq!(announce_data, expected_announce_data); @@ -137,7 +137,7 @@ mod tests { let tracker = Arc::new( Tracker::new( - Arc::new(configuration::ephemeral()), + &configuration::ephemeral().core, Some(stats_event_sender), statistics::Repo::new(), ) @@ -151,13 +151,14 @@ mod tests { fn tracker_with_an_ipv6_external_ip(stats_event_sender: Box) -> Tracker { let mut configuration = configuration::ephemeral(); - configuration.external_ip = - Some(IpAddr::V6(Ipv6Addr::new(0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969)).to_string()); + configuration.core.net.external_ip = Some(IpAddr::V6(Ipv6Addr::new( + 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, 0x6969, + ))); - Tracker::new(Arc::new(configuration), Some(stats_event_sender), statistics::Repo::new()).unwrap() + Tracker::new(&configuration.core, Some(stats_event_sender), statistics::Repo::new()).unwrap() } - fn peer_with_the_ipv4_loopback_ip() -> Peer { + fn peer_with_the_ipv4_loopback_ip() -> peer::Peer { let loopback_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); let mut peer = sample_peer(); peer.peer_addr = SocketAddr::new(loopback_ip, 8080); @@ -201,7 +202,7 @@ mod tests { let tracker = Arc::new( Tracker::new( - Arc::new(configuration::ephemeral()), + &configuration::ephemeral().core, Some(stats_event_sender), statistics::Repo::new(), ) diff --git a/src/servers/http/v1/services/scrape.rs b/src/servers/http/v1/services/scrape.rs index 240680ca3..b83abb321 100644 --- a/src/servers/http/v1/services/scrape.rs +++ b/src/servers/http/v1/services/scrape.rs @@ -2,17 +2,18 @@ //! //! The service is responsible for handling the `scrape` requests. //! -//! It delegates the `scrape` logic to the [`Tracker`](crate::tracker::Tracker::scrape) -//! and it returns the [`ScrapeData`](crate::tracker::ScrapeData) returned -//! by the [`Tracker`](crate::tracker::Tracker). +//! It delegates the `scrape` logic to the [`Tracker`](crate::core::Tracker::scrape) +//! and it returns the [`ScrapeData`] returned +//! by the [`Tracker`]. //! -//! It also sends an [`statistics::Event`](crate::tracker::statistics::Event) +//! It also sends an [`statistics::Event`] //! because events are specific for the HTTP tracker. use std::net::IpAddr; use std::sync::Arc; -use crate::shared::bit_torrent::info_hash::InfoHash; -use crate::tracker::{statistics, ScrapeData, Tracker}; +use torrust_tracker_primitives::info_hash::InfoHash; + +use crate::core::{statistics, ScrapeData, Tracker}; /// The HTTP tracker `scrape` service. /// @@ -22,8 +23,8 @@ use crate::tracker::{statistics, ScrapeData, Tracker}; /// - The number of TCP `scrape` requests handled by the HTTP tracker. /// /// > **NOTICE**: as the HTTP tracker does not requires a connection request -/// like the UDP tracker, the number of TCP connections is incremented for -/// each `scrape` request. +/// > like the UDP tracker, the number of TCP connections is incremented for +/// > each `scrape` request. pub async fn invoke(tracker: &Arc, info_hashes: &Vec, original_peer_ip: &IpAddr) -> ScrapeData { let scrape_data = tracker.scrape(info_hashes).await; @@ -60,16 +61,16 @@ mod tests { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_primitives::announce_event::AnnounceEvent; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::{peer, DurationSinceUnixEpoch, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; - use crate::shared::bit_torrent::info_hash::InfoHash; - use crate::shared::clock::DurationSinceUnixEpoch; - use crate::tracker::services::tracker_factory; - use crate::tracker::{peer, Tracker}; + use crate::core::services::tracker_factory; + use crate::core::Tracker; fn public_tracker() -> Tracker { - tracker_factory(configuration::ephemeral_mode_public().into()) + tracker_factory(&configuration::ephemeral_public()) } fn sample_info_hashes() -> Vec { @@ -99,14 +100,14 @@ mod tests { use std::sync::Arc; use mockall::predicate::eq; + use torrust_tracker_primitives::swarm_metadata::SwarmMetadata; use torrust_tracker_test_helpers::configuration; + use crate::core::{statistics, ScrapeData, Tracker}; use crate::servers::http::v1::services::scrape::invoke; use crate::servers::http::v1::services::scrape::tests::{ public_tracker, sample_info_hash, sample_info_hashes, sample_peer, }; - use crate::tracker::torrent::SwarmMetadata; - use crate::tracker::{statistics, ScrapeData, Tracker}; #[tokio::test] async fn it_should_return_the_scrape_data_for_a_torrent() { @@ -118,7 +119,7 @@ mod tests { // Announce a new peer to force scrape data to contain not zeroed data let mut peer = sample_peer(); let original_peer_ip = peer.ip(); - tracker.announce(&info_hash, &mut peer, &original_peer_ip).await; + tracker.announce(&info_hash, &mut peer, &original_peer_ip); let scrape_data = invoke(&tracker, &info_hashes, &original_peer_ip).await; @@ -147,7 +148,7 @@ mod tests { let tracker = Arc::new( Tracker::new( - Arc::new(configuration::ephemeral()), + &configuration::ephemeral().core, Some(stats_event_sender), statistics::Repo::new(), ) @@ -171,7 +172,7 @@ mod tests { let tracker = Arc::new( Tracker::new( - Arc::new(configuration::ephemeral()), + &configuration::ephemeral().core, Some(stats_event_sender), statistics::Repo::new(), ) @@ -193,11 +194,11 @@ mod tests { use mockall::predicate::eq; use torrust_tracker_test_helpers::configuration; + use crate::core::{statistics, ScrapeData, Tracker}; use crate::servers::http::v1::services::scrape::fake; use crate::servers::http::v1::services::scrape::tests::{ public_tracker, sample_info_hash, sample_info_hashes, sample_peer, }; - use crate::tracker::{statistics, ScrapeData, Tracker}; #[tokio::test] async fn it_should_always_return_the_zeroed_scrape_data_for_a_torrent() { @@ -209,7 +210,7 @@ mod tests { // Announce a new peer to force scrape data to contain not zeroed data let mut peer = sample_peer(); let original_peer_ip = peer.ip(); - tracker.announce(&info_hash, &mut peer, &original_peer_ip).await; + tracker.announce(&info_hash, &mut peer, &original_peer_ip); let scrape_data = fake(&tracker, &info_hashes, &original_peer_ip).await; @@ -230,7 +231,7 @@ mod tests { let tracker = Arc::new( Tracker::new( - Arc::new(configuration::ephemeral()), + &configuration::ephemeral().core, Some(stats_event_sender), statistics::Repo::new(), ) @@ -254,7 +255,7 @@ mod tests { let tracker = Arc::new( Tracker::new( - Arc::new(configuration::ephemeral()), + &configuration::ephemeral().core, Some(stats_event_sender), statistics::Repo::new(), ) diff --git a/src/servers/logging.rs b/src/servers/logging.rs new file mode 100644 index 000000000..ad9ccbbcc --- /dev/null +++ b/src/servers/logging.rs @@ -0,0 +1,29 @@ +/// This is the prefix used in logs to identify a started service. +/// +/// For example: +/// +/// ```text +/// 2024-06-25T12:36:25.025312Z INFO UDP TRACKER: Started on: udp://0.0.0.0:6969 +/// 2024-06-25T12:36:25.025445Z INFO HTTP TRACKER: Started on: http://0.0.0.0:7070 +/// 2024-06-25T12:36:25.025527Z INFO API: Started on http://0.0.0.0:1212 +/// 2024-06-25T12:36:25.025580Z INFO HEALTH CHECK API: Started on: http://127.0.0.1:1313 +/// ``` +pub const STARTED_ON: &str = "Started on"; + +/* + +todo: we should use a field fot the URL. + +For example, instead of: + +``` +2024-06-25T12:36:25.025312Z INFO UDP TRACKER: Started on: udp://0.0.0.0:6969 +``` + +We should use something like: + +``` +2024-06-25T12:36:25.025312Z INFO UDP TRACKER started_at_url=udp://0.0.0.0:6969 +``` + +*/ diff --git a/src/servers/mod.rs b/src/servers/mod.rs index 38b4b70cd..705a4728e 100644 --- a/src/servers/mod.rs +++ b/src/servers/mod.rs @@ -1,5 +1,9 @@ //! Servers. Services that can be started and stopped. pub mod apis; +pub mod custom_axum_server; +pub mod health_check_api; pub mod http; +pub mod logging; +pub mod registar; pub mod signals; pub mod udp; diff --git a/src/servers/registar.rs b/src/servers/registar.rs new file mode 100644 index 000000000..6058595ba --- /dev/null +++ b/src/servers/registar.rs @@ -0,0 +1,101 @@ +//! Registar. Registers Services for Health Check. + +use std::collections::HashMap; +use std::net::SocketAddr; +use std::sync::Arc; + +use derive_more::Constructor; +use tokio::sync::Mutex; +use tokio::task::JoinHandle; +use tracing::debug; + +/// A [`ServiceHeathCheckResult`] is returned by a completed health check. +pub type ServiceHeathCheckResult = Result; + +/// The [`ServiceHealthCheckJob`] has a health check job with it's metadata +/// +/// The `job` awaits a [`ServiceHeathCheckResult`]. +#[derive(Debug, Constructor)] +pub struct ServiceHealthCheckJob { + pub binding: SocketAddr, + pub info: String, + pub job: JoinHandle, +} + +/// The function specification [`FnSpawnServiceHeathCheck`]. +/// +/// A function fulfilling this specification will spawn a new [`ServiceHealthCheckJob`]. +pub type FnSpawnServiceHeathCheck = fn(&SocketAddr) -> ServiceHealthCheckJob; + +/// A [`ServiceRegistration`] is provided to the [`Registar`] for registration. +/// +/// Each registration includes a function that fulfils the [`FnSpawnServiceHeathCheck`] specification. +#[derive(Clone, Debug, Constructor)] +pub struct ServiceRegistration { + binding: SocketAddr, + check_fn: FnSpawnServiceHeathCheck, +} + +impl ServiceRegistration { + #[must_use] + pub fn spawn_check(&self) -> ServiceHealthCheckJob { + (self.check_fn)(&self.binding) + } +} + +/// A [`ServiceRegistrationForm`] will return a completed [`ServiceRegistration`] to the [`Registar`]. +pub type ServiceRegistrationForm = tokio::sync::oneshot::Sender; + +/// The [`ServiceRegistry`] contains each unique [`ServiceRegistration`] by it's [`SocketAddr`]. +pub type ServiceRegistry = Arc>>; + +/// The [`Registar`] manages the [`ServiceRegistry`]. +#[derive(Clone, Debug)] +pub struct Registar { + registry: ServiceRegistry, +} + +#[allow(clippy::derivable_impls)] +impl Default for Registar { + fn default() -> Self { + Self { + registry: ServiceRegistry::default(), + } + } +} + +impl Registar { + pub fn new(register: ServiceRegistry) -> Self { + Self { registry: register } + } + + /// Registers a Service + #[must_use] + pub fn give_form(&self) -> ServiceRegistrationForm { + let (tx, rx) = tokio::sync::oneshot::channel::(); + let register = self.clone(); + tokio::spawn(async move { + register.insert(rx).await; + }); + tx + } + + /// Inserts a listing into the registry. + async fn insert(&self, rx: tokio::sync::oneshot::Receiver) { + debug!("Waiting for the started service to send registration data ..."); + + let service_registration = rx + .await + .expect("it should receive the service registration from the started service"); + + let mut mutex = self.registry.lock().await; + + mutex.insert(service_registration.binding, service_registration); + } + + /// Returns the [`ServiceRegistry`] of services + #[must_use] + pub fn entries(&self) -> ServiceRegistry { + self.registry.clone() + } +} diff --git a/src/servers/signals.rs b/src/servers/signals.rs index 51f53738d..0a1a06312 100644 --- a/src/servers/signals.rs +++ b/src/servers/signals.rs @@ -1,5 +1,17 @@ //! This module contains functions to handle signals. -use log::info; +use std::time::Duration; + +use derive_more::Display; +use tokio::time::sleep; +use tracing::info; + +/// This is the message that the "launcher" spawned task receives from the main +/// application process to notify the service to shutdown. +/// +#[derive(Copy, Clone, Debug, Display)] +pub enum Halted { + Normal, +} /// Resolves on `ctrl_c` or the `terminate` signal. /// @@ -33,18 +45,38 @@ pub async fn global_shutdown_signal() { /// # Panics /// /// Will panic if the `stop_receiver` resolves with an error. -pub async fn shutdown_signal(stop_receiver: tokio::sync::oneshot::Receiver) { - let stop = async { stop_receiver.await.expect("Failed to install stop signal.") }; +pub async fn shutdown_signal(rx_halt: tokio::sync::oneshot::Receiver) { + let halt = async { + match rx_halt.await { + Ok(signal) => signal, + Err(err) => panic!("Failed to install stop signal: {err}"), + } + }; tokio::select! { - _ = stop => {}, - () = global_shutdown_signal() => {} + signal = halt => { info!("Halt signal processed: {}", signal) }, + () = global_shutdown_signal() => { info!("Global shutdown signal processed") } } } /// Same as `shutdown_signal()`, but shows a message when it resolves. -pub async fn shutdown_signal_with_message(stop_receiver: tokio::sync::oneshot::Receiver, message: String) { - shutdown_signal(stop_receiver).await; +pub async fn shutdown_signal_with_message(rx_halt: tokio::sync::oneshot::Receiver, message: String) { + shutdown_signal(rx_halt).await; info!("{message}"); } + +pub async fn graceful_shutdown(handle: axum_server::Handle, rx_halt: tokio::sync::oneshot::Receiver, message: String) { + shutdown_signal_with_message(rx_halt, message).await; + + info!("Sending graceful shutdown signal"); + handle.graceful_shutdown(Some(Duration::from_secs(90))); + + println!("!! shuting down in 90 seconds !!"); + + loop { + sleep(Duration::from_secs(1)).await; + + info!("remaining alive connections: {}", handle.connection_count()); + } +} diff --git a/src/servers/udp/connection_cookie.rs b/src/servers/udp/connection_cookie.rs index 4dc9896ab..c15ad114c 100644 --- a/src/servers/udp/connection_cookie.rs +++ b/src/servers/udp/connection_cookie.rs @@ -46,10 +46,10 @@ //! Peer C connects at timestamp 180 slot 1 -> connection ID will be valid from timestamp 180 to 360 //! ``` //! > **NOTICE**: connection ID is always the same for a given peer -//! (socket address) and time slot. +//! > (socket address) and time slot. //! //! > **NOTICE**: connection ID will be valid for two time extents, **not two -//! minutes**. It'll be valid for the the current time extent and the next one. +//! > minutes**. It'll be valid for the the current time extent and the next one. //! //! Refer to [`Connect`](crate::servers::udp#connect) for more information about //! the connection process. @@ -62,17 +62,17 @@ //! //! ## Disadvantages //! -//! - It's not very flexible. The connection ID is only valid for a certain -//! amount of time. -//! - It's not very accurate. The connection ID is valid for more than two -//! minutes. +//! - It's not very flexible. The connection ID is only valid for a certain amount of time. +//! - It's not very accurate. The connection ID is valid for more than two minutes. use std::net::SocketAddr; use std::panic::Location; use aquatic_udp_protocol::ConnectionId; +use torrust_tracker_clock::time_extent::{Extent, TimeExtent}; +use zerocopy::network_endian::I64; +use zerocopy::AsBytes; use super::error::Error; -use crate::shared::clock::time_extent::{Extent, TimeExtent}; pub type Cookie = [u8; 8]; @@ -83,13 +83,15 @@ pub const COOKIE_LIFETIME: TimeExtent = TimeExtent::from_sec(2, &60); /// Converts a connection ID into a connection cookie. #[must_use] pub fn from_connection_id(connection_id: &ConnectionId) -> Cookie { - connection_id.0.to_le_bytes() + let mut cookie = [0u8; 8]; + connection_id.write_to(&mut cookie); + cookie } /// Converts a connection cookie into a connection ID. #[must_use] pub fn into_connection_id(connection_cookie: &Cookie) -> ConnectionId { - ConnectionId(i64::from_le_bytes(*connection_cookie)) + ConnectionId(I64::new(i64::from_be_bytes(*connection_cookie))) } /// Generates a new connection cookie. @@ -133,9 +135,11 @@ mod cookie_builder { use std::hash::{Hash, Hasher}; use std::net::SocketAddr; + use torrust_tracker_clock::time_extent::{Extent, Make, TimeExtent}; + use super::{Cookie, SinceUnixEpochTimeExtent, COOKIE_LIFETIME}; - use crate::shared::clock::time_extent::{DefaultTimeExtentMaker, Extent, Make, TimeExtent}; use crate::shared::crypto::keys::seeds::{Current, Keeper}; + use crate::DefaultTimeExtentMaker; pub(super) fn get_last_time_extent() -> SinceUnixEpochTimeExtent { DefaultTimeExtentMaker::now(&COOKIE_LIFETIME.increment) @@ -162,10 +166,12 @@ mod cookie_builder { mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use torrust_tracker_clock::clock::stopped::Stopped as _; + use torrust_tracker_clock::clock::{self}; + use torrust_tracker_clock::time_extent::{self, Extent}; + use super::cookie_builder::{self}; use crate::servers::udp::connection_cookie::{check, make, Cookie, COOKIE_LIFETIME}; - use crate::shared::clock::time_extent::{self, Extent}; - use crate::shared::clock::{Stopped, StoppedTime}; // #![feature(const_socketaddr)] // const REMOTE_ADDRESS_IPV4_ZERO: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); @@ -173,11 +179,14 @@ mod tests { #[test] fn it_should_make_a_connection_cookie() { // Note: This constant may need to be updated in the future as the hash is not guaranteed to to be stable between versions. - const ID_COOKIE: Cookie = [23, 204, 198, 29, 48, 180, 62, 19]; + const ID_COOKIE_OLD: Cookie = [23, 204, 198, 29, 48, 180, 62, 19]; + const ID_COOKIE_NEW: Cookie = [41, 166, 45, 246, 249, 24, 108, 203]; + + clock::Stopped::local_set_to_unix_epoch(); let cookie = make(&SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0)); - assert_eq!(cookie, ID_COOKIE); + assert!(cookie == ID_COOKIE_OLD || cookie == ID_COOKIE_NEW); } #[test] @@ -275,7 +284,7 @@ mod tests { let cookie = make(&remote_address); - Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); + clock::Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); let cookie_next = make(&remote_address); @@ -297,7 +306,7 @@ mod tests { let cookie = make(&remote_address); - Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); + clock::Stopped::local_add(&COOKIE_LIFETIME.increment).unwrap(); check(&remote_address, &cookie).unwrap(); } @@ -306,9 +315,11 @@ mod tests { fn it_should_be_valid_for_the_last_time_extent() { let remote_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0); + clock::Stopped::local_set_to_unix_epoch(); + let cookie = make(&remote_address); - Stopped::local_set(&COOKIE_LIFETIME.total().unwrap().unwrap()); + clock::Stopped::local_set(&COOKIE_LIFETIME.total().unwrap().unwrap()); check(&remote_address, &cookie).unwrap(); } @@ -320,7 +331,7 @@ mod tests { let cookie = make(&remote_address); - Stopped::local_set(&COOKIE_LIFETIME.total_next().unwrap().unwrap()); + clock::Stopped::local_set(&COOKIE_LIFETIME.total_next().unwrap().unwrap()); check(&remote_address, &cookie).unwrap(); } diff --git a/src/servers/udp/error.rs b/src/servers/udp/error.rs index ce59cd015..315c9d1cf 100644 --- a/src/servers/udp/error.rs +++ b/src/servers/udp/error.rs @@ -13,7 +13,7 @@ pub enum Error { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, }, - /// Error returned from a third-party library (aquatic_udp_protocol). + /// Error returned from a third-party library (`aquatic_udp_protocol`). #[error("internal server error: {message}, {location}")] InternalServer { location: &'static Location<'static>, @@ -29,4 +29,8 @@ pub enum Error { BadRequest { source: LocatedError<'static, dyn std::error::Error + Send + Sync>, }, + + /// Error returned when tracker requires authentication. + #[error("domain tracker requires authentication but is not supported in current UDP implementation. Location: {location}")] + TrackerAuthenticationRequired { location: &'static Location<'static> }, } diff --git a/src/servers/udp/handlers.rs b/src/servers/udp/handlers.rs index 64d60e549..53683fbb9 100644 --- a/src/servers/udp/handlers.rs +++ b/src/servers/udp/handlers.rs @@ -1,55 +1,85 @@ //! Handlers for the UDP server. -use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; +use std::fmt; +use std::net::{IpAddr, SocketAddr}; use std::panic::Location; use std::sync::Arc; +use std::time::Instant; use aquatic_udp_protocol::{ - AnnounceInterval, AnnounceRequest, AnnounceResponse, ConnectRequest, ConnectResponse, ErrorResponse, NumberOfDownloads, - NumberOfPeers, Port, Request, Response, ResponsePeer, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, + AnnounceInterval, AnnounceRequest, AnnounceResponse, AnnounceResponseFixedData, ConnectRequest, ConnectResponse, + ErrorResponse, Ipv4AddrBytes, Ipv6AddrBytes, NumberOfDownloads, NumberOfPeers, Port, Request, Response, ResponsePeer, + ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; -use log::debug; +use torrust_tracker_located_error::DynError; +use torrust_tracker_primitives::info_hash::InfoHash; +use tracing::debug; +use uuid::Uuid; +use zerocopy::network_endian::I32; use super::connection_cookie::{check, from_connection_id, into_connection_id, make}; +use super::RawRequest; +use crate::core::{statistics, ScrapeData, Tracker}; use crate::servers::udp::error::Error; +use crate::servers::udp::logging::{log_bad_request, log_error_response, log_request, log_response}; use crate::servers::udp::peer_builder; use crate::servers::udp::request::AnnounceWrapper; use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; -use crate::shared::bit_torrent::info_hash::InfoHash; -use crate::tracker::{statistics, Tracker}; /// It handles the incoming UDP packets. /// /// It's responsible for: /// /// - Parsing the incoming packet. -/// - Delegating the request to the correct handler depending on the request -/// type. +/// - Delegating the request to the correct handler depending on the request type. /// /// It will return an `Error` response if the request is invalid. -pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: &Tracker) -> Response { - match Request::from_bytes(&payload[..payload.len()], MAX_SCRAPE_TORRENTS).map_err(|e| Error::InternalServer { - message: format!("{e:?}"), - location: Location::caller(), +pub(crate) async fn handle_packet(udp_request: RawRequest, tracker: &Tracker, local_addr: SocketAddr) -> Response { + debug!("Handling Packets: {udp_request:?}"); + + let start_time = Instant::now(); + + let request_id = RequestId::make(&udp_request); + + match Request::parse_bytes(&udp_request.payload[..udp_request.payload.len()], MAX_SCRAPE_TORRENTS).map_err(|e| { + Error::InternalServer { + message: format!("{e:?}"), + location: Location::caller(), + } }) { Ok(request) => { + log_request(&request, &request_id, &local_addr); + let transaction_id = match &request { Request::Connect(connect_request) => connect_request.transaction_id, Request::Announce(announce_request) => announce_request.transaction_id, Request::Scrape(scrape_request) => scrape_request.transaction_id, }; - match handle_request(request, remote_addr, tracker).await { + let response = match handle_request(request, udp_request.from, tracker).await { Ok(response) => response, Err(e) => handle_error(&e, transaction_id), - } + }; + + let latency = start_time.elapsed(); + + log_response(&response, &transaction_id, &request_id, &local_addr, latency); + + response + } + Err(e) => { + log_bad_request(&request_id); + + let response = handle_error( + &Error::BadRequest { + source: (Arc::new(e) as DynError).into(), + }, + TransactionId(I32::new(0)), + ); + + log_error_response(&request_id); + + response } - // bad request - Err(e) => handle_error( - &Error::BadRequest { - source: (Arc::new(e) as Arc).into(), - }, - TransactionId(0), - ), } } @@ -59,6 +89,8 @@ pub async fn handle_packet(remote_addr: SocketAddr, payload: Vec, tracker: & /// /// If a error happens in the `handle_request` function, it will just return the `ServerError`. pub async fn handle_request(request: Request, remote_addr: SocketAddr, tracker: &Tracker) -> Result { + debug!("Handling Request: {request:?} to: {remote_addr:?}"); + match request { Request::Connect(connect_request) => handle_connect(remote_addr, &connect_request, tracker).await, Request::Announce(announce_request) => handle_announce(remote_addr, &announce_request, tracker).await, @@ -98,22 +130,6 @@ pub async fn handle_connect(remote_addr: SocketAddr, request: &ConnectRequest, t Ok(Response::from(response)) } -/// It authenticates the request. It returns an error if the peer is not allowed -/// to make the request. -/// -/// # Errors -/// -/// Will return `Error` if unable to `authenticate_request`. -#[allow(deprecated)] -pub async fn authenticate(info_hash: &InfoHash, tracker: &Tracker) -> Result<(), Error> { - tracker - .authenticate_request(info_hash, &None) - .await - .map_err(|e| Error::TrackerError { - source: (Arc::new(e) as Arc).into(), - }) -} - /// It handles the `Announce` request. Refer to [`Announce`](crate::servers::udp#announce) /// request for more information. /// @@ -127,6 +143,13 @@ pub async fn handle_announce( ) -> Result { debug!("udp announce request: {:#?}", announce_request); + // Authentication + if tracker.requires_authentication() { + return Err(Error::TrackerAuthenticationRequired { + location: Location::caller(), + }); + } + check(&remote_addr, &from_connection_id(&announce_request.connection_id))?; let wrapped_announce_request = AnnounceWrapper::new(announce_request); @@ -134,11 +157,14 @@ pub async fn handle_announce( let info_hash = wrapped_announce_request.info_hash; let remote_client_ip = remote_addr.ip(); - authenticate(&info_hash, tracker).await?; + // Authorization + tracker.authorize(&info_hash).await.map_err(|e| Error::TrackerError { + source: (Arc::new(e) as Arc).into(), + })?; let mut peer = peer_builder::from_request(&wrapped_announce_request, &remote_client_ip); - let response = tracker.announce(&info_hash, &mut peer, &remote_client_ip).await; + let response = tracker.announce(&info_hash, &mut peer, &remote_client_ip); match remote_client_ip { IpAddr::V4(_) => { @@ -152,18 +178,20 @@ pub async fn handle_announce( #[allow(clippy::cast_possible_truncation)] if remote_addr.is_ipv4() { let announce_response = AnnounceResponse { - transaction_id: wrapped_announce_request.announce_request.transaction_id, - announce_interval: AnnounceInterval(i64::from(tracker.config.announce_interval) as i32), - leechers: NumberOfPeers(i64::from(response.swarm_stats.leechers) as i32), - seeders: NumberOfPeers(i64::from(response.swarm_stats.seeders) as i32), + fixed: AnnounceResponseFixedData { + transaction_id: wrapped_announce_request.announce_request.transaction_id, + announce_interval: AnnounceInterval(I32::new(i64::from(tracker.get_announce_policy().interval) as i32)), + leechers: NumberOfPeers(I32::new(i64::from(response.stats.incomplete) as i32)), + seeders: NumberOfPeers(I32::new(i64::from(response.stats.complete) as i32)), + }, peers: response .peers .iter() .filter_map(|peer| { if let IpAddr::V4(ip) = peer.peer_addr.ip() { - Some(ResponsePeer:: { - ip_address: ip, - port: Port(peer.peer_addr.port()), + Some(ResponsePeer:: { + ip_address: ip.into(), + port: Port(peer.peer_addr.port().into()), }) } else { None @@ -177,18 +205,20 @@ pub async fn handle_announce( Ok(Response::from(announce_response)) } else { let announce_response = AnnounceResponse { - transaction_id: wrapped_announce_request.announce_request.transaction_id, - announce_interval: AnnounceInterval(i64::from(tracker.config.announce_interval) as i32), - leechers: NumberOfPeers(i64::from(response.swarm_stats.leechers) as i32), - seeders: NumberOfPeers(i64::from(response.swarm_stats.seeders) as i32), + fixed: AnnounceResponseFixedData { + transaction_id: wrapped_announce_request.announce_request.transaction_id, + announce_interval: AnnounceInterval(I32::new(i64::from(tracker.get_announce_policy().interval) as i32)), + leechers: NumberOfPeers(I32::new(i64::from(response.stats.incomplete) as i32)), + seeders: NumberOfPeers(I32::new(i64::from(response.stats.complete) as i32)), + }, peers: response .peers .iter() .filter_map(|peer| { if let IpAddr::V6(ip) = peer.peer_addr.ip() { - Some(ResponsePeer:: { - ip_address: ip, - port: Port(peer.peer_addr.port()), + Some(ResponsePeer:: { + ip_address: ip.into(), + port: Port(peer.peer_addr.port().into()), }) } else { None @@ -218,27 +248,23 @@ pub async fn handle_scrape(remote_addr: SocketAddr, request: &ScrapeRequest, tra info_hashes.push(InfoHash(info_hash.0)); } - let scrape_data = tracker.scrape(&info_hashes).await; + let scrape_data = if tracker.requires_authentication() { + ScrapeData::zeroed(&info_hashes) + } else { + tracker.scrape(&info_hashes).await + }; let mut torrent_stats: Vec = Vec::new(); for file in &scrape_data.files { - let info_hash = file.0; let swarm_metadata = file.1; - #[allow(deprecated)] - let scrape_entry = if tracker.authenticate_request(info_hash, &None).await.is_ok() { - #[allow(clippy::cast_possible_truncation)] - TorrentScrapeStatistics { - seeders: NumberOfPeers(i64::from(swarm_metadata.complete) as i32), - completed: NumberOfDownloads(i64::from(swarm_metadata.downloaded) as i32), - leechers: NumberOfPeers(i64::from(swarm_metadata.incomplete) as i32), - } - } else { + #[allow(clippy::cast_possible_truncation)] + let scrape_entry = { TorrentScrapeStatistics { - seeders: NumberOfPeers(0), - completed: NumberOfDownloads(0), - leechers: NumberOfPeers(0), + seeders: NumberOfPeers(I32::new(i64::from(swarm_metadata.complete) as i32)), + completed: NumberOfDownloads(I32::new(i64::from(swarm_metadata.downloaded) as i32)), + leechers: NumberOfPeers(I32::new(i64::from(swarm_metadata.incomplete) as i32)), } }; @@ -273,22 +299,39 @@ fn handle_error(e: &Error, transaction_id: TransactionId) -> Response { }) } +/// An identifier for a request. +#[derive(Debug, Clone)] +pub struct RequestId(Uuid); + +impl RequestId { + fn make(_request: &RawRequest) -> RequestId { + RequestId(Uuid::new_v4()) + } +} + +impl fmt::Display for RequestId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} + #[cfg(test)] mod tests { use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::Arc; - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; + use torrust_tracker_clock::clock::Time; use torrust_tracker_configuration::Configuration; + use torrust_tracker_primitives::{peer, NumberOfBytes}; use torrust_tracker_test_helpers::configuration; - use crate::shared::clock::{Current, Time}; - use crate::tracker::services::tracker_factory; - use crate::tracker::{peer, Tracker}; + use crate::core::services::tracker_factory; + use crate::core::Tracker; + use crate::CurrentClock; - fn tracker_configuration() -> Arc { - Arc::new(default_testing_tracker_configuration()) + fn tracker_configuration() -> Configuration { + default_testing_tracker_configuration() } fn default_testing_tracker_configuration() -> Configuration { @@ -296,18 +339,18 @@ mod tests { } fn public_tracker() -> Arc { - initialized_tracker(configuration::ephemeral_mode_public().into()) + initialized_tracker(&configuration::ephemeral_public()) } fn private_tracker() -> Arc { - initialized_tracker(configuration::ephemeral_mode_private().into()) + initialized_tracker(&configuration::ephemeral_private()) } fn whitelisted_tracker() -> Arc { - initialized_tracker(configuration::ephemeral_mode_whitelisted().into()) + initialized_tracker(&configuration::ephemeral_listed()) } - fn initialized_tracker(configuration: Arc) -> Arc { + fn initialized_tracker(configuration: &Configuration) -> Arc { tracker_factory(configuration).into() } @@ -327,39 +370,41 @@ mod tests { SocketAddr::new(IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)), 8080) } - struct TorrentPeerBuilder { + #[derive(Debug, Default)] + pub struct TorrentPeerBuilder { peer: peer::Peer, } impl TorrentPeerBuilder { - pub fn default() -> TorrentPeerBuilder { - let default_peer = peer::Peer { - peer_id: peer::Id([255u8; 20]), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: Current::now(), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), - event: AnnounceEvent::Started, - }; - TorrentPeerBuilder { peer: default_peer } + #[must_use] + pub fn new() -> Self { + Self { + peer: peer::Peer { + updated: CurrentClock::now(), + ..Default::default() + }, + } } - pub fn with_peer_id(mut self, peer_id: peer::Id) -> Self { - self.peer.peer_id = peer_id; + #[must_use] + pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { + self.peer.peer_addr = peer_addr; self } - pub fn with_peer_addr(mut self, peer_addr: SocketAddr) -> Self { - self.peer.peer_addr = peer_addr; + #[must_use] + pub fn with_peer_id(mut self, peer_id: peer::Id) -> Self { + self.peer.peer_id = peer_id; self } - pub fn with_bytes_left(mut self, left: i64) -> Self { + #[must_use] + pub fn with_number_of_bytes_left(mut self, left: i64) -> Self { self.peer.left = NumberOfBytes(left); self } + #[must_use] pub fn into(self) -> peer::Peer { self.peer } @@ -378,7 +423,7 @@ mod tests { } pub fn with_external_ip(mut self, external_ip: &str) -> Self { - self.configuration.external_ip = Some(external_ip.to_owned()); + self.configuration.core.net.external_ip = Some(external_ip.to_owned().parse().expect("valid IP address")); self } @@ -396,21 +441,21 @@ mod tests { use mockall::predicate::eq; use super::{sample_ipv4_socket_address, sample_ipv6_remote_addr, tracker_configuration}; + use crate::core::{self, statistics}; use crate::servers::udp::connection_cookie::{into_connection_id, make}; use crate::servers::udp::handlers::handle_connect; use crate::servers::udp::handlers::tests::{public_tracker, sample_ipv4_remote_addr}; - use crate::tracker::{self, statistics}; fn sample_connect_request() -> ConnectRequest { ConnectRequest { - transaction_id: TransactionId(0i32), + transaction_id: TransactionId(0i32.into()), } } #[tokio::test] async fn a_connect_response_should_contain_the_same_transaction_id_as_the_connect_request() { let request = ConnectRequest { - transaction_id: TransactionId(0i32), + transaction_id: TransactionId(0i32.into()), }; let response = handle_connect(sample_ipv4_remote_addr(), &request, &public_tracker()) @@ -429,7 +474,7 @@ mod tests { #[tokio::test] async fn a_connect_response_should_contain_a_new_connection_id() { let request = ConnectRequest { - transaction_id: TransactionId(0i32), + transaction_id: TransactionId(0i32.into()), }; let response = handle_connect(sample_ipv4_remote_addr(), &request, &public_tracker()) @@ -458,7 +503,12 @@ mod tests { let client_socket_address = sample_ipv4_socket_address(); let torrent_tracker = Arc::new( - tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + core::Tracker::new( + &tracker_configuration().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), ); handle_connect(client_socket_address, &sample_connect_request(), &torrent_tracker) .await @@ -476,7 +526,12 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let torrent_tracker = Arc::new( - tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + core::Tracker::new( + &tracker_configuration().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), ); handle_connect(sample_ipv6_remote_addr(), &sample_connect_request(), &torrent_tracker) .await @@ -487,10 +542,11 @@ mod tests { mod announce_request { use std::net::Ipv4Addr; + use std::num::NonZeroU16; use aquatic_udp_protocol::{ - AnnounceEvent, AnnounceRequest, ConnectionId, NumberOfBytes, NumberOfPeers, PeerId as AquaticPeerId, PeerKey, Port, - TransactionId, + AnnounceActionPlaceholder, AnnounceEvent, AnnounceRequest, ConnectionId, NumberOfBytes, NumberOfPeers, + PeerId as AquaticPeerId, PeerKey, Port, TransactionId, }; use crate::servers::udp::connection_cookie::{into_connection_id, make}; @@ -508,17 +564,18 @@ mod tests { let default_request = AnnounceRequest { connection_id: into_connection_id(&make(&sample_ipv4_remote_addr())), - transaction_id: TransactionId(0i32), + action_placeholder: AnnounceActionPlaceholder::default(), + transaction_id: TransactionId(0i32.into()), info_hash: info_hash_aquatic, peer_id: AquaticPeerId([255u8; 20]), - bytes_downloaded: NumberOfBytes(0i64), - bytes_uploaded: NumberOfBytes(0i64), - bytes_left: NumberOfBytes(0i64), - event: AnnounceEvent::Started, - ip_address: Some(client_ip), - key: PeerKey(0u32), - peers_wanted: NumberOfPeers(1i32), - port: Port(client_port), + bytes_downloaded: NumberOfBytes(0i64.into()), + bytes_uploaded: NumberOfBytes(0i64.into()), + bytes_left: NumberOfBytes(0i64.into()), + event: AnnounceEvent::Started.into(), + ip_address: client_ip.into(), + key: PeerKey::new(0i32), + peers_wanted: NumberOfPeers::new(1i32), + port: Port::new(NonZeroU16::new(client_port).expect("a non-zero client port")), }; AnnounceRequestBuilder { request: default_request, @@ -541,12 +598,12 @@ mod tests { } pub fn with_ip_address(mut self, ip_address: Ipv4Addr) -> Self { - self.request.ip_address = Some(ip_address); + self.request.ip_address = ip_address.into(); self } pub fn with_port(mut self, port: u16) -> Self { - self.request.port = Port(port); + self.request.port = Port(port.into()); self } @@ -558,22 +615,23 @@ mod tests { mod using_ipv4 { use std::future; - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::sync::Arc; use aquatic_udp_protocol::{ - AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, NumberOfPeers, PeerId as AquaticPeerId, - Response, ResponsePeer, + AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, Ipv4AddrBytes, Ipv6AddrBytes, NumberOfPeers, + PeerId as AquaticPeerId, Response, ResponsePeer, }; use mockall::predicate::eq; + use torrust_tracker_primitives::peer; + use crate::core::{self, statistics}; use crate::servers::udp::connection_cookie::{into_connection_id, make}; - use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ public_tracker, sample_ipv4_socket_address, tracker_configuration, TorrentPeerBuilder, }; - use crate::tracker::{self, peer, statistics}; + use crate::servers::udp::handlers::{handle_announce, AnnounceResponseFixedData}; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { @@ -596,14 +654,14 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()); - let expected_peer = TorrentPeerBuilder::default() + let expected_peer = TorrentPeerBuilder::new() .with_peer_id(peer::Id(peer_id.0)) - .with_peer_addr(SocketAddr::new(IpAddr::V4(client_ip), client_port)) + .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip), client_port)) .into(); - assert_eq!(peers[0], expected_peer); + assert_eq!(peers[0], Arc::new(expected_peer)); } #[tokio::test] @@ -616,14 +674,16 @@ mod tests { let response = handle_announce(remote_addr, &request, &public_tracker()).await.unwrap(); - let empty_peer_vector: Vec> = vec![]; + let empty_peer_vector: Vec> = vec![]; assert_eq!( response, Response::from(AnnounceResponse { - transaction_id: request.transaction_id, - announce_interval: AnnounceInterval(120i32), - leechers: NumberOfPeers(0i32), - seeders: NumberOfPeers(1i32), + fixed: AnnounceResponseFixedData { + transaction_id: request.transaction_id, + announce_interval: AnnounceInterval(120i32.into()), + leechers: NumberOfPeers(0i32.into()), + seeders: NumberOfPeers(1i32.into()), + }, peers: empty_peer_vector }) ); @@ -657,12 +717,12 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()); assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V4(remote_client_ip), client_port)); } - async fn add_a_torrent_peer_using_ipv6(tracker: Arc) { + fn add_a_torrent_peer_using_ipv6(tracker: &Arc) { let info_hash = AquaticInfoHash([0u8; 20]); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); @@ -670,17 +730,15 @@ mod tests { let client_port = 8080; let peer_id = AquaticPeerId([255u8; 20]); - let peer_using_ipv6 = TorrentPeerBuilder::default() + let peer_using_ipv6 = TorrentPeerBuilder::new() .with_peer_id(peer::Id(peer_id.0)) - .with_peer_addr(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) + .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); - tracker - .update_torrent_with_peer_and_get_stats(&info_hash.0.into(), &peer_using_ipv6) - .await; + tracker.upsert_peer_and_get_stats(&info_hash.0.into(), &peer_using_ipv6); } - async fn announce_a_new_peer_using_ipv4(tracker: Arc) -> Response { + async fn announce_a_new_peer_using_ipv4(tracker: Arc) -> Response { let remote_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080); let request = AnnounceRequestBuilder::default() .with_connection_id(into_connection_id(&make(&remote_addr))) @@ -693,12 +751,12 @@ mod tests { async fn when_the_announce_request_comes_from_a_client_using_ipv4_the_response_should_not_include_peers_using_ipv6() { let tracker = public_tracker(); - add_a_torrent_peer_using_ipv6(tracker.clone()).await; + add_a_torrent_peer_using_ipv6(&tracker); let response = announce_a_new_peer_using_ipv4(tracker.clone()).await; // The response should not contain the peer using IPV6 - let peers: Option>> = match response { + let peers: Option>> = match response { Response::AnnounceIpv6(announce_response) => Some(announce_response.peers), _ => None, }; @@ -717,7 +775,12 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + core::Tracker::new( + &tracker_configuration().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), ); handle_announce( @@ -731,14 +794,15 @@ mod tests { mod from_a_loopback_ip { use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + use std::sync::Arc; use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + use torrust_tracker_primitives::peer; use crate::servers::udp::connection_cookie::{into_connection_id, make}; use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{public_tracker, TorrentPeerBuilder}; - use crate::tracker::peer; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration_if_defined() { @@ -761,17 +825,16 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()); - let external_ip_in_tracker_configuration = - tracker.config.external_ip.clone().unwrap().parse::().unwrap(); + let external_ip_in_tracker_configuration = tracker.get_maybe_external_ip().unwrap(); - let expected_peer = TorrentPeerBuilder::default() + let expected_peer = TorrentPeerBuilder::new() .with_peer_id(peer::Id(peer_id.0)) - .with_peer_addr(SocketAddr::new(IpAddr::V4(external_ip_in_tracker_configuration), client_port)) + .with_peer_address(SocketAddr::new(external_ip_in_tracker_configuration, client_port)) .into(); - assert_eq!(peers[0], expected_peer); + assert_eq!(peers[0], Arc::new(expected_peer)); } } } @@ -779,22 +842,23 @@ mod tests { mod using_ipv6 { use std::future; - use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; + use std::net::{IpAddr, Ipv4Addr, SocketAddr}; use std::sync::Arc; use aquatic_udp_protocol::{ - AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, NumberOfPeers, PeerId as AquaticPeerId, - Response, ResponsePeer, + AnnounceInterval, AnnounceResponse, InfoHash as AquaticInfoHash, Ipv4AddrBytes, Ipv6AddrBytes, NumberOfPeers, + PeerId as AquaticPeerId, Response, ResponsePeer, }; use mockall::predicate::eq; + use torrust_tracker_primitives::peer; + use crate::core::{self, statistics}; use crate::servers::udp::connection_cookie::{into_connection_id, make}; - use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::{ public_tracker, sample_ipv6_remote_addr, tracker_configuration, TorrentPeerBuilder, }; - use crate::tracker::{self, peer, statistics}; + use crate::servers::udp::handlers::{handle_announce, AnnounceResponseFixedData}; #[tokio::test] async fn an_announced_peer_should_be_added_to_the_tracker() { @@ -818,14 +882,14 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()); - let expected_peer = TorrentPeerBuilder::default() + let expected_peer = TorrentPeerBuilder::new() .with_peer_id(peer::Id(peer_id.0)) - .with_peer_addr(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) + .with_peer_address(SocketAddr::new(IpAddr::V6(client_ip_v6), client_port)) .into(); - assert_eq!(peers[0], expected_peer); + assert_eq!(peers[0], Arc::new(expected_peer)); } #[tokio::test] @@ -841,14 +905,16 @@ mod tests { let response = handle_announce(remote_addr, &request, &public_tracker()).await.unwrap(); - let empty_peer_vector: Vec> = vec![]; + let empty_peer_vector: Vec> = vec![]; assert_eq!( response, Response::from(AnnounceResponse { - transaction_id: request.transaction_id, - announce_interval: AnnounceInterval(120i32), - leechers: NumberOfPeers(0i32), - seeders: NumberOfPeers(1i32), + fixed: AnnounceResponseFixedData { + transaction_id: request.transaction_id, + announce_interval: AnnounceInterval(120i32.into()), + leechers: NumberOfPeers(0i32.into()), + seeders: NumberOfPeers(1i32.into()), + }, peers: empty_peer_vector }) ); @@ -882,30 +948,28 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()); // When using IPv6 the tracker converts the remote client ip into a IPv4 address assert_eq!(peers[0].peer_addr, SocketAddr::new(IpAddr::V6(remote_client_ip), client_port)); } - async fn add_a_torrent_peer_using_ipv4(tracker: Arc) { + fn add_a_torrent_peer_using_ipv4(tracker: &Arc) { let info_hash = AquaticInfoHash([0u8; 20]); let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_port = 8080; let peer_id = AquaticPeerId([255u8; 20]); - let peer_using_ipv4 = TorrentPeerBuilder::default() + let peer_using_ipv4 = TorrentPeerBuilder::new() .with_peer_id(peer::Id(peer_id.0)) - .with_peer_addr(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) + .with_peer_address(SocketAddr::new(IpAddr::V4(client_ip_v4), client_port)) .into(); - tracker - .update_torrent_with_peer_and_get_stats(&info_hash.0.into(), &peer_using_ipv4) - .await; + tracker.upsert_peer_and_get_stats(&info_hash.0.into(), &peer_using_ipv4); } - async fn announce_a_new_peer_using_ipv6(tracker: Arc) -> Response { + async fn announce_a_new_peer_using_ipv6(tracker: Arc) -> Response { let client_ip_v4 = Ipv4Addr::new(126, 0, 0, 1); let client_ip_v6 = client_ip_v4.to_ipv6_compatible(); let client_port = 8080; @@ -921,12 +985,12 @@ mod tests { async fn when_the_announce_request_comes_from_a_client_using_ipv6_the_response_should_not_include_peers_using_ipv4() { let tracker = public_tracker(); - add_a_torrent_peer_using_ipv4(tracker.clone()).await; + add_a_torrent_peer_using_ipv4(&tracker); let response = announce_a_new_peer_using_ipv6(tracker.clone()).await; // The response should not contain the peer using IPV4 - let peers: Option>> = match response { + let peers: Option>> = match response { Response::AnnounceIpv4(announce_response) => Some(announce_response.peers), _ => None, }; @@ -945,7 +1009,12 @@ mod tests { let stats_event_sender = Box::new(stats_event_sender_mock); let tracker = Arc::new( - tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + core::Tracker::new( + &tracker_configuration().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), ); let remote_addr = sample_ipv6_remote_addr(); @@ -963,19 +1032,19 @@ mod tests { use aquatic_udp_protocol::{InfoHash as AquaticInfoHash, PeerId as AquaticPeerId}; + use crate::core; + use crate::core::statistics::Keeper; use crate::servers::udp::connection_cookie::{into_connection_id, make}; use crate::servers::udp::handlers::handle_announce; use crate::servers::udp::handlers::tests::announce_request::AnnounceRequestBuilder; use crate::servers::udp::handlers::tests::TrackerConfigurationBuilder; - use crate::tracker; - use crate::tracker::statistics::Keeper; #[tokio::test] async fn the_peer_ip_should_be_changed_to_the_external_ip_in_the_tracker_configuration() { let configuration = Arc::new(TrackerConfigurationBuilder::default().with_external_ip("::126.0.0.1").into()); let (stats_event_sender, stats_repository) = Keeper::new_active_instance(); let tracker = - Arc::new(tracker::Tracker::new(configuration, Some(stats_event_sender), stats_repository).unwrap()); + Arc::new(core::Tracker::new(&configuration.core, Some(stats_event_sender), stats_repository).unwrap()); let loopback_ipv4 = Ipv4Addr::new(127, 0, 0, 1); let loopback_ipv6 = Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1); @@ -999,10 +1068,11 @@ mod tests { handle_announce(remote_addr, &request, &tracker).await.unwrap(); - let peers = tracker.get_all_torrent_peers(&info_hash.0.into()).await; + let peers = tracker.get_torrent_peers(&info_hash.0.into()); + + let external_ip_in_tracker_configuration = tracker.get_maybe_external_ip().unwrap(); - let _external_ip_in_tracker_configuration = - tracker.config.external_ip.clone().unwrap().parse::().unwrap(); + assert!(external_ip_in_tracker_configuration.is_ipv6()); // There's a special type of IPv6 addresses that provide compatibility with IPv4. // The last 32 bits of these addresses represent an IPv4, and are represented like this: @@ -1023,18 +1093,19 @@ mod tests { InfoHash, NumberOfDownloads, NumberOfPeers, Response, ScrapeRequest, ScrapeResponse, TorrentScrapeStatistics, TransactionId, }; + use torrust_tracker_primitives::peer; use super::TorrentPeerBuilder; + use crate::core::{self}; use crate::servers::udp::connection_cookie::{into_connection_id, make}; use crate::servers::udp::handlers::handle_scrape; use crate::servers::udp::handlers::tests::{public_tracker, sample_ipv4_remote_addr}; - use crate::tracker::{self, peer}; fn zeroed_torrent_statistics() -> TorrentScrapeStatistics { TorrentScrapeStatistics { - seeders: NumberOfPeers(0), - completed: NumberOfDownloads(0), - leechers: NumberOfPeers(0), + seeders: NumberOfPeers(0.into()), + completed: NumberOfDownloads(0.into()), + leechers: NumberOfPeers(0.into()), } } @@ -1047,7 +1118,7 @@ mod tests { let request = ScrapeRequest { connection_id: into_connection_id(&make(&remote_addr)), - transaction_id: TransactionId(0i32), + transaction_id: TransactionId(0i32.into()), info_hashes, }; @@ -1064,18 +1135,16 @@ mod tests { ); } - async fn add_a_seeder(tracker: Arc, remote_addr: &SocketAddr, info_hash: &InfoHash) { + async fn add_a_seeder(tracker: Arc, remote_addr: &SocketAddr, info_hash: &InfoHash) { let peer_id = peer::Id([255u8; 20]); - let peer = TorrentPeerBuilder::default() + let peer = TorrentPeerBuilder::new() .with_peer_id(peer::Id(peer_id.0)) - .with_peer_addr(*remote_addr) - .with_bytes_left(0) + .with_peer_address(*remote_addr) + .with_number_of_bytes_left(0) .into(); - tracker - .update_torrent_with_peer_and_get_stats(&info_hash.0.into(), &peer) - .await; + tracker.upsert_peer_and_get_stats(&info_hash.0.into(), &peer); } fn build_scrape_request(remote_addr: &SocketAddr, info_hash: &InfoHash) -> ScrapeRequest { @@ -1083,12 +1152,12 @@ mod tests { ScrapeRequest { connection_id: into_connection_id(&make(remote_addr)), - transaction_id: TransactionId(0i32), + transaction_id: TransactionId::new(0i32), info_hashes, } } - async fn add_a_sample_seeder_and_scrape(tracker: Arc) -> Response { + async fn add_a_sample_seeder_and_scrape(tracker: Arc) -> Response { let remote_addr = sample_ipv4_remote_addr(); let info_hash = InfoHash([0u8; 20]); @@ -1119,9 +1188,9 @@ mod tests { let torrent_stats = match_scrape_response(add_a_sample_seeder_and_scrape(tracker.clone()).await); let expected_torrent_stats = vec![TorrentScrapeStatistics { - seeders: NumberOfPeers(1), - completed: NumberOfDownloads(0), - leechers: NumberOfPeers(0), + seeders: NumberOfPeers(1.into()), + completed: NumberOfDownloads(0.into()), + leechers: NumberOfPeers(0.into()), }]; assert_eq!(torrent_stats.unwrap().torrent_stats, expected_torrent_stats); @@ -1192,9 +1261,9 @@ mod tests { let torrent_stats = match_scrape_response(handle_scrape(remote_addr, &request, &tracker).await.unwrap()).unwrap(); let expected_torrent_stats = vec![TorrentScrapeStatistics { - seeders: NumberOfPeers(1), - completed: NumberOfDownloads(0), - leechers: NumberOfPeers(0), + seeders: NumberOfPeers(1.into()), + completed: NumberOfDownloads(0.into()), + leechers: NumberOfPeers(0.into()), }]; assert_eq!(torrent_stats.torrent_stats, expected_torrent_stats); @@ -1225,7 +1294,7 @@ mod tests { ScrapeRequest { connection_id: into_connection_id(&make(remote_addr)), - transaction_id: TransactionId(0i32), + transaction_id: TransactionId(0i32.into()), info_hashes, } } @@ -1237,9 +1306,9 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; + use crate::core::{self, statistics}; use crate::servers::udp::handlers::handle_scrape; use crate::servers::udp::handlers::tests::{sample_ipv4_remote_addr, tracker_configuration}; - use crate::tracker::{self, statistics}; #[tokio::test] async fn should_send_the_upd4_scrape_event() { @@ -1253,7 +1322,12 @@ mod tests { let remote_addr = sample_ipv4_remote_addr(); let tracker = Arc::new( - tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + core::Tracker::new( + &tracker_configuration().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), &tracker) @@ -1269,9 +1343,9 @@ mod tests { use mockall::predicate::eq; use super::sample_scrape_request; + use crate::core::{self, statistics}; use crate::servers::udp::handlers::handle_scrape; use crate::servers::udp::handlers::tests::{sample_ipv6_remote_addr, tracker_configuration}; - use crate::tracker::{self, statistics}; #[tokio::test] async fn should_send_the_upd6_scrape_event() { @@ -1285,7 +1359,12 @@ mod tests { let remote_addr = sample_ipv6_remote_addr(); let tracker = Arc::new( - tracker::Tracker::new(tracker_configuration(), Some(stats_event_sender), statistics::Repo::new()).unwrap(), + core::Tracker::new( + &tracker_configuration().core, + Some(stats_event_sender), + statistics::Repo::new(), + ) + .unwrap(), ); handle_scrape(remote_addr, &sample_scrape_request(&remote_addr), &tracker) diff --git a/src/servers/udp/logging.rs b/src/servers/udp/logging.rs new file mode 100644 index 000000000..3891278d7 --- /dev/null +++ b/src/servers/udp/logging.rs @@ -0,0 +1,87 @@ +//! Logging for UDP Tracker requests and responses. + +use std::net::SocketAddr; +use std::time::Duration; + +use aquatic_udp_protocol::{Request, Response, TransactionId}; +use torrust_tracker_primitives::info_hash::InfoHash; + +use super::handlers::RequestId; +use crate::servers::udp::UDP_TRACKER_LOG_TARGET; + +pub fn log_request(request: &Request, request_id: &RequestId, server_socket_addr: &SocketAddr) { + let action = map_action_name(request); + + match &request { + Request::Connect(connect_request) => { + let transaction_id = connect_request.transaction_id; + let transaction_id_str = transaction_id.0.to_string(); + + tracing::span!( + target: UDP_TRACKER_LOG_TARGET, + tracing::Level::INFO, "request", server_socket_addr = %server_socket_addr, action = %action, transaction_id = %transaction_id_str, request_id = %request_id); + } + Request::Announce(announce_request) => { + let transaction_id = announce_request.transaction_id; + let transaction_id_str = transaction_id.0.to_string(); + let connection_id_str = announce_request.connection_id.0.to_string(); + let info_hash_str = InfoHash::from_bytes(&announce_request.info_hash.0).to_hex_string(); + + tracing::span!( + target: UDP_TRACKER_LOG_TARGET, + tracing::Level::INFO, "request", server_socket_addr = %server_socket_addr, action = %action, transaction_id = %transaction_id_str, request_id = %request_id, connection_id = %connection_id_str, info_hash = %info_hash_str); + } + Request::Scrape(scrape_request) => { + let transaction_id = scrape_request.transaction_id; + let transaction_id_str = transaction_id.0.to_string(); + let connection_id_str = scrape_request.connection_id.0.to_string(); + + tracing::span!( + target: UDP_TRACKER_LOG_TARGET, + tracing::Level::INFO, + "request", + server_socket_addr = %server_socket_addr, + action = %action, + transaction_id = %transaction_id_str, + request_id = %request_id, + connection_id = %connection_id_str); + } + }; +} + +fn map_action_name(udp_request: &Request) -> String { + match udp_request { + Request::Connect(_connect_request) => "CONNECT".to_owned(), + Request::Announce(_announce_request) => "ANNOUNCE".to_owned(), + Request::Scrape(_scrape_request) => "SCRAPE".to_owned(), + } +} + +pub fn log_response( + _response: &Response, + transaction_id: &TransactionId, + request_id: &RequestId, + server_socket_addr: &SocketAddr, + latency: Duration, +) { + tracing::span!( + target: UDP_TRACKER_LOG_TARGET, + tracing::Level::INFO, + "response", + server_socket_addr = %server_socket_addr, + transaction_id = %transaction_id.0.to_string(), + request_id = %request_id, + latency_ms = %latency.as_millis()); +} + +pub fn log_bad_request(request_id: &RequestId) { + tracing::span!( + target: UDP_TRACKER_LOG_TARGET, + tracing::Level::INFO, "bad request", request_id = %request_id); +} + +pub fn log_error_response(request_id: &RequestId) { + tracing::span!( + target: UDP_TRACKER_LOG_TARGET, + tracing::Level::INFO, "response", request_id = %request_id); +} diff --git a/src/servers/udp/mod.rs b/src/servers/udp/mod.rs index edbfd77d2..8ea05d5b1 100644 --- a/src/servers/udp/mod.rs +++ b/src/servers/udp/mod.rs @@ -5,7 +5,7 @@ //! The UDP tracker is a simple UDP server that responds to these requests: //! //! - `Connect`: used to get a connection ID which must be provided on each -//! request in order to avoid spoofing the source address of the UDP packets. +//! request in order to avoid spoofing the source address of the UDP packets. //! - `Announce`: used to announce the presence of a peer to the tracker. //! - `Scrape`: used to get information about a torrent. //! @@ -22,10 +22,10 @@ //! for more information about the UDP tracker protocol. //! //! > **NOTICE**: [BEP-41](https://www.bittorrent.org/beps/bep_0041.html) is not -//! implemented yet. +//! > implemented yet. //! //! > **NOTICE**: we are using the [`aquatic_udp_protocol`](https://crates.io/crates/aquatic_udp_protocol) -//! crate so requests and responses are handled by it. +//! > crate so requests and responses are handled by it. //! //! > **NOTICE**: all values are send in network byte order ([big endian](https://en.wikipedia.org/wiki/Endianness)). //! @@ -53,7 +53,7 @@ //! supports only three types of requests: `Connect`, `Announce` and `Scrape`. //! //! Request are parsed from UDP packets using the [`aquatic_udp_protocol`](https://crates.io/crates/aquatic_udp_protocol) -//! crate and then handled by the [`Tracker`](crate::tracker::Tracker) struct. +//! crate and then handled by the [`Tracker`](crate::core::Tracker) struct. //! And then the response is also build using the [`aquatic_udp_protocol`](https://crates.io/crates/aquatic_udp_protocol) //! and converted to a UDP packet. //! @@ -62,7 +62,7 @@ //! ``` //! //! For the `Announce` request there is a wrapper struct [`AnnounceWrapper`](crate::servers::udp::request::AnnounceWrapper). -//! It was added to add an extra field with the internal [`InfoHash`](crate::shared::bit_torrent::info_hash::InfoHash) struct. +//! It was added to add an extra field with the internal [`InfoHash`](torrust_tracker_primitives::info_hash::InfoHash) struct. //! //! ### Connect //! @@ -83,23 +83,23 @@ //! spoofing can be explained as follows: //! //! 1. No connection state: Unlike TCP, UDP is a connectionless protocol, -//! meaning that it does not establish a connection between two endpoints before -//! exchanging data. As a result, it is more susceptible to IP spoofing, where -//! an attacker sends packets with a forged source IP address, tricking the -//! receiver into believing that they are coming from a legitimate source. +//! meaning that it does not establish a connection between two endpoints before +//! exchanging data. As a result, it is more susceptible to IP spoofing, where +//! an attacker sends packets with a forged source IP address, tricking the +//! receiver into believing that they are coming from a legitimate source. //! //! 2. Mitigating IP spoofing: To mitigate IP spoofing in the UDP tracker -//! protocol, a connection ID is used. When a client wants to interact with a -//! tracker, it sends a "connect" request to the tracker, which, in turn, -//! responds with a unique connection ID. This connection ID must be included in -//! all subsequent requests from the client to the tracker. +//! protocol, a connection ID is used. When a client wants to interact with a +//! tracker, it sends a "connect" request to the tracker, which, in turn, +//! responds with a unique connection ID. This connection ID must be included in +//! all subsequent requests from the client to the tracker. //! //! 3. Validating requests: By requiring the connection ID, the tracker can -//! verify that the requests are coming from the same client that initially sent -//! the "connect" request. If an attacker attempts to spoof the client's IP -//! address, they would also need to know the valid connection ID to be accepted -//! by the tracker. This makes it significantly more challenging for an attacker -//! to spoof IP addresses and disrupt the P2P network. +//! verify that the requests are coming from the same client that initially sent +//! the "connect" request. If an attacker attempts to spoof the client's IP +//! address, they would also need to know the valid connection ID to be accepted +//! by the tracker. This makes it significantly more challenging for an attacker +//! to spoof IP addresses and disrupt the P2P network. //! //! There are different ways to generate a connection ID. The most common way is //! to generate a time bound secret. The secret is generated using a time based @@ -109,7 +109,7 @@ //! connection ID = hash(client IP + current time slot + secret seed) //! ``` //! -//! The BEP-15 recommends a two-minute time slot. Refer to [`connection_cookie`](crate::servers::udp::connection_cookie) +//! The BEP-15 recommends a two-minute time slot. Refer to [`connection_cookie`] //! for more information about the connection ID generation with this method. //! //! #### Connect Request @@ -161,9 +161,9 @@ //! 8 | [`i32`](std::i64) | `connection_id` | Generated by the tracker to authenticate the client. | `0xC5_58_7C_09_08_48_D8_37` | `-4226491872051668937` //! //! > **NOTICE**: the `connection_id` is used when further information is -//! exchanged with the tracker, to identify the client. This `connection_id` can -//! be reused for multiple requests, but if it's cached for too long, it will -//! not be valid anymore. +//! > exchanged with the tracker, to identify the client. This `connection_id` can +//! > be reused for multiple requests, but if it's cached for too long, it will +//! > not be valid anymore. //! //! > **NOTICE**: `Hex` column is a signed 2's complement. //! @@ -243,41 +243,41 @@ //! circumstances might include: //! //! 1. Network Address Translation (NAT): In cases where a peer is behind a NAT, -//! the private IP address of the peer is not directly routable over the -//! internet. The NAT device translates the private IP address to a public one -//! when sending packets to the tracker. The public IP address is what the -//! tracker sees as the source IP of the incoming request. However, if the peer -//! provides its private IP address in the announce request, the tracker can use -//! this information to facilitate communication between peers in the same -//! private network. +//! the private IP address of the peer is not directly routable over the +//! internet. The NAT device translates the private IP address to a public one +//! when sending packets to the tracker. The public IP address is what the +//! tracker sees as the source IP of the incoming request. However, if the peer +//! provides its private IP address in the announce request, the tracker can use +//! this information to facilitate communication between peers in the same +//! private network. //! //! 2. Proxy or VPN usage: If a peer uses a proxy or VPN service to connect to -//! the tracker, the source IP address seen by the tracker will be the one -//! assigned by the proxy or VPN server. In this case, if the peer provides its -//! actual IP address in the announce request, the tracker can use it to -//! establish a direct connection with other peers, bypassing the proxy or VPN -//! server. This might improve performance or help in cases where some peers -//! cannot connect to the proxy or VPN server. +//! the tracker, the source IP address seen by the tracker will be the one +//! assigned by the proxy or VPN server. In this case, if the peer provides its +//! actual IP address in the announce request, the tracker can use it to +//! establish a direct connection with other peers, bypassing the proxy or VPN +//! server. This might improve performance or help in cases where some peers +//! cannot connect to the proxy or VPN server. //! //! 3. Tracker is behind a NAT, firewall, proxy, VPN, or load balancer: In cases -//! where the tracker is behind a NAT, firewall, proxy, VPN, or load balancer, -//! the source IP address of the incoming request will be the public IP address -//! of the NAT, firewall, proxy, VPN, or load balancer. If the peer provides its -//! private IP address in the announce request, the tracker can use this -//! information to establish a direct connection with the peer. +//! where the tracker is behind a NAT, firewall, proxy, VPN, or load balancer, +//! the source IP address of the incoming request will be the public IP address +//! of the NAT, firewall, proxy, VPN, or load balancer. If the peer provides its +//! private IP address in the announce request, the tracker can use this +//! information to establish a direct connection with the peer. //! //! It's important to note that using the provided IP address can pose security //! risks, as malicious peers might spoof their IP addresses in the announce //! request to perform various types of attacks. //! //! > **NOTICE**: The current tracker behavior is to ignore the IP address -//! provided by the peer, and use the source IP address of the incoming request, -//! when the tracker is not running behind a proxy, and to use the right-most IP -//! address in the `X-Forwarded-For` header when the tracker is running behind a -//! proxy. +//! > provided by the peer, and use the source IP address of the incoming request, +//! > when the tracker is not running behind a proxy, and to use the right-most IP +//! > address in the `X-Forwarded-For` header when the tracker is running behind a +//! > proxy. //! //! > **NOTICE**: The tracker also changes the peer IP address to the tracker -//! external IP when the peer is using a loopback IP address. +//! > external IP when the peer is using a loopback IP address. //! //! **Sample announce request (UDP packet)** //! @@ -317,11 +317,11 @@ //! 101 | N bytes | | | | //! //! > **NOTICE**: bytes after offset 98 are part of the [BEP-41. UDP Tracker Protocol Extensions](https://www.bittorrent.org/beps/bep_0041.html). -//! There are three options defined for byte 98: `0x0` (`EndOfOptions`), `0x1` (`NOP`) and `0x2` (`URLData`). +//! > There are three options defined for byte 98: `0x0` (`EndOfOptions`), `0x1` (`NOP`) and `0x2` (`URLData`). //! //! > **NOTICE**: `num_want` is being ignored by the tracker. Refer to -//! [issue 262](https://github.com/torrust/torrust-tracker/issues/262) for more -//! information. +//! > [issue 262](https://github.com/torrust/torrust-tracker/issues/262) for more +//! > information. //! //! **Announce request (parsed struct)** //! @@ -342,10 +342,10 @@ //! `port` | [`Port`](aquatic_udp_protocol::common::Port) | `17548` //! //! > **NOTICE**: the `peers_wanted` field is the `num_want` field in the UDP -//! packet. +//! > packet. //! //! We are using a wrapper struct for the aquatic [`AnnounceRequest`](aquatic_udp_protocol::request::AnnounceRequest) -//! struct, because we have our internal [`InfoHash`](crate::shared::bit_torrent::info_hash::InfoHash) +//! struct, because we have our internal [`InfoHash`](torrust_tracker_primitives::info_hash::InfoHash) //! struct. //! //! ```text @@ -374,7 +374,7 @@ //! > **NOTICE**: `Hex` column is a signed 2's complement. //! //! > **NOTICE**: `IP address` should always be set to 0 when the peer is using -//! `IPv6`. +//! > `IPv6`. //! //! **Sample announce response (UDP packet)** //! @@ -413,7 +413,7 @@ //! ``` //! //! > **NOTICE**: there are 6 bytes per peer (4 bytes for the `IPv4` address and -//! 2 bytes for the TCP port). +//! > 2 bytes for the TCP port). //! //! UDP packet fields (`IPv4` peer list): //! @@ -433,7 +433,7 @@ //! ``` //! //! > **NOTICE**: there are 18 bytes per peer (16 bytes for the `IPv6` address and -//! 2 bytes for the TCP port). +//! > 2 bytes for the TCP port). //! //! UDP packet fields (`IPv6` peer list): //! @@ -446,7 +446,7 @@ //! > **NOTICE**: `Hex` column is a signed 2's complement. //! //! > **NOTICE**: the peer list does not include the peer that sent the announce -//! request. +//! > request. //! //! **Announce response (struct)** //! @@ -467,21 +467,21 @@ //! //! ### Scrape //! -//! The `scrape` request allows a peer to get [swarm metadata](crate::tracker::torrent::SwarmMetadata) +//! The `scrape` request allows a peer to get [swarm metadata](torrust_tracker_primitives::swarm_metadata::SwarmMetadata) //! for multiple torrents at the same time. //! -//! The response contains the [swarm metadata](crate::tracker::torrent::SwarmMetadata) +//! The response contains the [swarm metadata](torrust_tracker_primitives::swarm_metadata::SwarmMetadata) //! for that torrent: //! -//! - [complete](crate::tracker::torrent::SwarmMetadata::complete) -//! - [downloaded](crate::tracker::torrent::SwarmMetadata::downloaded) -//! - [incomplete](crate::tracker::torrent::SwarmMetadata::incomplete) +//! - [complete](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::complete) +//! - [downloaded](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::downloaded) +//! - [incomplete](torrust_tracker_primitives::swarm_metadata::SwarmMetadata::incomplete) //! //! > **NOTICE**: up to about 74 torrents can be scraped at once. A full scrape -//! can't be done with this protocol. This is a limitation of the UDP protocol. -//! Defined with a hardcoded const [`MAX_SCRAPE_TORRENTS`](crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS). -//! Refer to [issue 262](https://github.com/torrust/torrust-tracker/issues/262) -//! for more information about this limitation. +//! > can't be done with this protocol. This is a limitation of the UDP protocol. +//! > Defined with a hardcoded const [`MAX_SCRAPE_TORRENTS`](crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS). +//! > Refer to [issue 262](https://github.com/torrust/torrust-tracker/issues/262) +//! > for more information about this limitation. //! //! #### Scrape Request //! @@ -638,13 +638,19 @@ //! documentation by [Arvid Norberg](https://github.com/arvidn) was very //! supportive in the development of this documentation. Some descriptions were //! taken from the [libtorrent](https://www.rasterbar.com/products/libtorrent/udp_tracker_protocol.html). + +use std::net::SocketAddr; + pub mod connection_cookie; pub mod error; pub mod handlers; +pub mod logging; pub mod peer_builder; pub mod request; pub mod server; +pub const UDP_TRACKER_LOG_TARGET: &str = "UDP TRACKER"; + /// Number of bytes. pub type Bytes = u64; /// The port the peer is listening on. @@ -653,8 +659,8 @@ pub type Port = u16; /// match requests and responses. pub type TransactionId = i64; -/// The maximum number of bytes in a UDP packet. -pub const MAX_PACKET_SIZE: usize = 1496; -/// A magic 64-bit integer constant defined in the protocol that is used to -/// identify the protocol. -pub const PROTOCOL_ID: i64 = 0x0417_2710_1980; +#[derive(Clone, Debug)] +pub struct RawRequest { + payload: Vec, + from: SocketAddr, +} diff --git a/src/servers/udp/peer_builder.rs b/src/servers/udp/peer_builder.rs index ac62a7ecd..e54a23443 100644 --- a/src/servers/udp/peer_builder.rs +++ b/src/servers/udp/peer_builder.rs @@ -1,27 +1,36 @@ //! Logic to extract the peer info from the announce request. use std::net::{IpAddr, SocketAddr}; +use torrust_tracker_clock::clock::Time; +use torrust_tracker_primitives::announce_event::AnnounceEvent; +use torrust_tracker_primitives::{peer, NumberOfBytes}; + use super::request::AnnounceWrapper; -use crate::shared::clock::{Current, Time}; -use crate::tracker::peer::{Id, Peer}; +use crate::CurrentClock; -/// Extracts the [`Peer`](crate::tracker::peer::Peer) info from the +/// Extracts the [`peer::Peer`] info from the /// announce request. /// /// # Arguments /// /// * `announce_wrapper` - The announce request to extract the peer info from. -/// * `peer_ip` - The real IP address of the peer, not the one in the announce -/// request. +/// * `peer_ip` - The real IP address of the peer, not the one in the announce request. #[must_use] -pub fn from_request(announce_wrapper: &AnnounceWrapper, peer_ip: &IpAddr) -> Peer { - Peer { - peer_id: Id(announce_wrapper.announce_request.peer_id.0), - peer_addr: SocketAddr::new(*peer_ip, announce_wrapper.announce_request.port.0), - updated: Current::now(), - uploaded: announce_wrapper.announce_request.bytes_uploaded, - downloaded: announce_wrapper.announce_request.bytes_downloaded, - left: announce_wrapper.announce_request.bytes_left, - event: announce_wrapper.announce_request.event, +pub fn from_request(announce_wrapper: &AnnounceWrapper, peer_ip: &IpAddr) -> peer::Peer { + let announce_event = match aquatic_udp_protocol::AnnounceEvent::from(announce_wrapper.announce_request.event) { + aquatic_udp_protocol::AnnounceEvent::Started => AnnounceEvent::Started, + aquatic_udp_protocol::AnnounceEvent::Stopped => AnnounceEvent::Stopped, + aquatic_udp_protocol::AnnounceEvent::Completed => AnnounceEvent::Completed, + aquatic_udp_protocol::AnnounceEvent::None => AnnounceEvent::None, + }; + + peer::Peer { + peer_id: peer::Id(announce_wrapper.announce_request.peer_id.0), + peer_addr: SocketAddr::new(*peer_ip, announce_wrapper.announce_request.port.0.into()), + updated: CurrentClock::now(), + uploaded: NumberOfBytes(announce_wrapper.announce_request.bytes_uploaded.0.into()), + downloaded: NumberOfBytes(announce_wrapper.announce_request.bytes_downloaded.0.into()), + left: NumberOfBytes(announce_wrapper.announce_request.bytes_left.0.into()), + event: announce_event, } } diff --git a/src/servers/udp/request.rs b/src/servers/udp/request.rs index 0afa02806..f95fec07a 100644 --- a/src/servers/udp/request.rs +++ b/src/servers/udp/request.rs @@ -6,12 +6,11 @@ //! Some of the type in this module are wrappers around the types in the //! `aquatic_udp_protocol` crate. use aquatic_udp_protocol::AnnounceRequest; +use torrust_tracker_primitives::info_hash::InfoHash; -use crate::shared::bit_torrent::info_hash::InfoHash; - -/// Wrapper around [`AnnounceRequest`](aquatic_udp_protocol::request::AnnounceRequest). +/// Wrapper around [`AnnounceRequest`]. pub struct AnnounceWrapper { - /// [`AnnounceRequest`](aquatic_udp_protocol::request::AnnounceRequest) to wrap. + /// [`AnnounceRequest`] to wrap. pub announce_request: AnnounceRequest, /// Info hash of the torrent. pub info_hash: InfoHash, @@ -22,7 +21,7 @@ impl AnnounceWrapper { #[must_use] pub fn new(announce_request: &AnnounceRequest) -> Self { AnnounceWrapper { - announce_request: announce_request.clone(), + announce_request: *announce_request, info_hash: InfoHash(announce_request.info_hash.0), } } diff --git a/src/servers/udp/server.rs b/src/servers/udp/server.rs deleted file mode 100644 index 5e5c98704..000000000 --- a/src/servers/udp/server.rs +++ /dev/null @@ -1,270 +0,0 @@ -//! Module to handle the UDP server instances. -//! -//! There are two main types in this module: -//! -//! - [`UdpServer`](crate::servers::udp::server::UdpServer): a controller to -//! start and stop the server. -//! - [`Udp`](crate::servers::udp::server::Udp): the server launcher. -//! -//! The `UdpServer` is an state machine for a given configuration. This struct -//! represents concrete configuration and state. It allows to start and -//! stop the server but always keeping the same configuration. -//! -//! The `Udp` is the server launcher. It's responsible for launching the UDP -//! but without keeping any state. -//! -//! For the time being, the `UdpServer` is only used for testing purposes, -//! because we want to be able to start and stop the server multiple times, and -//! we want to know the bound address and the current state of the server. -//! In production, the `Udp` launcher is used directly. -use std::future::Future; -use std::io::Cursor; -use std::net::SocketAddr; -use std::sync::Arc; - -use aquatic_udp_protocol::Response; -use futures::pin_mut; -use log::{debug, error, info}; -use tokio::net::UdpSocket; -use tokio::task::JoinHandle; - -use crate::servers::signals::shutdown_signal; -use crate::servers::udp::handlers::handle_packet; -use crate::servers::udp::MAX_PACKET_SIZE; -use crate::tracker::Tracker; - -/// Error that can occur when starting or stopping the UDP server. -/// -/// Some errors triggered while starting the server are: -/// -/// - The server cannot bind to the given address. -/// - It cannot get the bound address. -/// -/// Some errors triggered while stopping the server are: -/// -/// - The [`UdpServer`](crate::servers::udp::server::UdpServer) cannot send the -/// shutdown signal to the spawned UDP service thread. -#[derive(Debug)] -pub enum Error { - /// Any kind of error starting or stopping the server. - Error(String), // todo: refactor to use thiserror and add more variants for specific errors. -} - -/// A UDP server instance controller with no UDP instance running. -#[allow(clippy::module_name_repetitions)] -pub type StoppedUdpServer = UdpServer; - -/// A UDP server instance controller with a running UDP instance. -#[allow(clippy::module_name_repetitions)] -pub type RunningUdpServer = UdpServer; - -/// A UDP server instance controller. -/// -/// It's responsible for: -/// -/// - Keeping the initial configuration of the server. -/// - Starting and stopping the server. -/// - Keeping the state of the server: `running` or `stopped`. -/// -/// It's an state machine. Configurations cannot be changed. This struct -/// represents concrete configuration and state. It allows to start and stop the -/// server but always keeping the same configuration. -/// -/// > **NOTICE**: if the configurations changes after running the server it will -/// reset to the initial value after stopping the server. This struct is not -/// intended to persist configurations between runs. -#[allow(clippy::module_name_repetitions)] -pub struct UdpServer { - /// The configuration of the server that will be used every time the server - /// is started. - pub cfg: torrust_tracker_configuration::UdpTracker, - /// The state of the server: `running` or `stopped`. - pub state: S, -} - -/// A stopped UDP server state. -pub struct Stopped; - -/// A running UDP server state. -pub struct Running { - /// The address where the server is bound. - pub bind_address: SocketAddr, - stop_job_sender: tokio::sync::oneshot::Sender, - job: JoinHandle<()>, -} - -impl UdpServer { - /// Creates a new `UdpServer` instance in `stopped`state. - #[must_use] - pub fn new(cfg: torrust_tracker_configuration::UdpTracker) -> Self { - Self { cfg, state: Stopped {} } - } - - /// It starts the server and returns a `UdpServer` controller in `running` - /// state. - /// - /// # Errors - /// - /// Will return `Err` if UDP can't bind to given bind address. - pub async fn start(self, tracker: Arc) -> Result, Error> { - let udp = Udp::new(&self.cfg.bind_address) - .await - .map_err(|e| Error::Error(e.to_string()))?; - - let bind_address = udp.socket.local_addr().map_err(|e| Error::Error(e.to_string()))?; - - let (sender, receiver) = tokio::sync::oneshot::channel::(); - - let job = tokio::spawn(async move { - udp.start_with_graceful_shutdown(tracker, shutdown_signal(receiver)).await; - }); - - let running_udp_server: UdpServer = UdpServer { - cfg: self.cfg, - state: Running { - bind_address, - stop_job_sender: sender, - job, - }, - }; - - Ok(running_udp_server) - } -} - -impl UdpServer { - /// It stops the server and returns a `UdpServer` controller in `stopped` - /// state. - /// - /// # Errors - /// - /// Will return `Err` if the oneshot channel to send the stop signal - /// has already been called once. - pub async fn stop(self) -> Result, Error> { - self.state.stop_job_sender.send(1).map_err(|e| Error::Error(e.to_string()))?; - - drop(self.state.job.await); - - let stopped_api_server: UdpServer = UdpServer { - cfg: self.cfg, - state: Stopped {}, - }; - - Ok(stopped_api_server) - } -} - -/// A UDP server instance launcher. -pub struct Udp { - socket: Arc, -} - -impl Udp { - /// Creates a new `Udp` instance. - /// - /// # Errors - /// - /// Will return `Err` unable to bind to the supplied `bind_address`. - pub async fn new(bind_address: &str) -> tokio::io::Result { - let socket = UdpSocket::bind(bind_address).await?; - - Ok(Udp { - socket: Arc::new(socket), - }) - } - - /// It starts the UDP server instance. - /// - /// # Panics - /// - /// It would panic if unable to resolve the `local_addr` from the supplied ´socket´. - pub async fn start(&self, tracker: Arc) { - loop { - let mut data = [0; MAX_PACKET_SIZE]; - let socket = self.socket.clone(); - - tokio::select! { - _ = tokio::signal::ctrl_c() => { - info!("Stopping UDP server: {}..", socket.local_addr().unwrap()); - break; - } - Ok((valid_bytes, remote_addr)) = socket.recv_from(&mut data) => { - let payload = data[..valid_bytes].to_vec(); - - info!("Received {} bytes", payload.len()); - debug!("From: {}", &remote_addr); - debug!("Payload: {:?}", payload); - - let response = handle_packet(remote_addr, payload, &tracker).await; - - Udp::send_response(socket, remote_addr, response).await; - } - } - } - } - - /// It starts the UDP server instance with graceful shutdown. - /// - /// # Panics - /// - /// It would panic if unable to resolve the `local_addr` from the supplied ´socket´. - async fn start_with_graceful_shutdown(&self, tracker: Arc, shutdown_signal: F) - where - F: Future, - { - // Pin the future so that it doesn't move to the first loop iteration. - pin_mut!(shutdown_signal); - - loop { - let mut data = [0; MAX_PACKET_SIZE]; - let socket = self.socket.clone(); - - tokio::select! { - () = &mut shutdown_signal => { - info!("Stopping UDP server: {}..", self.socket.local_addr().unwrap()); - break; - } - Ok((valid_bytes, remote_addr)) = socket.recv_from(&mut data) => { - let payload = data[..valid_bytes].to_vec(); - - info!("Received {} bytes", payload.len()); - debug!("From: {}", &remote_addr); - debug!("Payload: {:?}", payload); - - let response = handle_packet(remote_addr, payload, &tracker).await; - - Udp::send_response(socket, remote_addr, response).await; - } - } - } - } - - async fn send_response(socket: Arc, remote_addr: SocketAddr, response: Response) { - let buffer = vec![0u8; MAX_PACKET_SIZE]; - let mut cursor = Cursor::new(buffer); - - match response.write(&mut cursor) { - Ok(()) => { - #[allow(clippy::cast_possible_truncation)] - let position = cursor.position() as usize; - let inner = cursor.get_ref(); - - info!("Sending {} bytes ...", &inner[..position].len()); - debug!("To: {:?}", &remote_addr); - debug!("Payload: {:?}", &inner[..position]); - - Udp::send_packet(socket, &remote_addr, &inner[..position]).await; - - info!("{} bytes sent", &inner[..position].len()); - } - Err(_) => { - error!("could not write response to bytes."); - } - } - } - - async fn send_packet(socket: Arc, remote_addr: &SocketAddr, payload: &[u8]) { - // doesn't matter if it reaches or not - drop(socket.send_to(payload, remote_addr).await); - } -} diff --git a/src/servers/udp/server/bound_socket.rs b/src/servers/udp/server/bound_socket.rs new file mode 100644 index 000000000..42242e44a --- /dev/null +++ b/src/servers/udp/server/bound_socket.rs @@ -0,0 +1,70 @@ +use std::fmt::Debug; +use std::net::SocketAddr; +use std::ops::Deref; + +use url::Url; + +use crate::servers::udp::UDP_TRACKER_LOG_TARGET; + +/// Wrapper for Tokio [`UdpSocket`][`tokio::net::UdpSocket`] that is bound to a particular socket. +pub struct BoundSocket { + socket: tokio::net::UdpSocket, +} + +impl BoundSocket { + /// # Errors + /// + /// Will return an error if the socket can't be bound the the provided address. + pub async fn new(addr: SocketAddr) -> Result> { + let bind_addr = format!("udp://{addr}"); + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, bind_addr, "UdpSocket::new (binding)"); + + let socket = tokio::net::UdpSocket::bind(addr).await; + + let socket = match socket { + Ok(socket) => socket, + Err(e) => Err(e)?, + }; + + let local_addr = format!("udp://{addr}"); + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, "UdpSocket::new (bound)"); + + Ok(Self { socket }) + } + + /// # Panics + /// + /// Will panic if the socket can't get the address it was bound to. + #[must_use] + pub fn address(&self) -> SocketAddr { + self.socket.local_addr().expect("it should get local address") + } + + /// # Panics + /// + /// Will panic if the address the socket was bound to is not a valid address + /// to be used in a URL. + #[must_use] + pub fn url(&self) -> Url { + Url::parse(&format!("udp://{}", self.address())).expect("UDP socket address should be valid") + } +} + +impl Deref for BoundSocket { + type Target = tokio::net::UdpSocket; + + fn deref(&self) -> &Self::Target { + &self.socket + } +} + +impl Debug for BoundSocket { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let local_addr = match self.socket.local_addr() { + Ok(socket) => format!("Receiving From: {socket}"), + Err(err) => format!("Socket Broken: {err}"), + }; + + f.debug_struct("UdpSocket").field("addr", &local_addr).finish_non_exhaustive() + } +} diff --git a/src/servers/udp/server/launcher.rs b/src/servers/udp/server/launcher.rs new file mode 100644 index 000000000..7b40f6604 --- /dev/null +++ b/src/servers/udp/server/launcher.rs @@ -0,0 +1,156 @@ +use std::net::SocketAddr; +use std::sync::Arc; +use std::time::Duration; + +use derive_more::Constructor; +use futures_util::StreamExt; +use tokio::select; +use tokio::sync::oneshot; + +use super::request_buffer::ActiveRequests; +use crate::bootstrap::jobs::Started; +use crate::core::Tracker; +use crate::servers::logging::STARTED_ON; +use crate::servers::registar::ServiceHealthCheckJob; +use crate::servers::signals::{shutdown_signal_with_message, Halted}; +use crate::servers::udp::server::bound_socket::BoundSocket; +use crate::servers::udp::server::processor::Processor; +use crate::servers::udp::server::receiver::Receiver; +use crate::servers::udp::UDP_TRACKER_LOG_TARGET; +use crate::shared::bit_torrent::tracker::udp::client::check; + +/// A UDP server instance launcher. +#[derive(Constructor)] +pub struct Launcher; + +impl Launcher { + /// It starts the UDP server instance with graceful shutdown. + /// + /// # Panics + /// + /// It panics if unable to bind to udp socket, and get the address from the udp socket. + /// It also panics if unable to send address of socket. + pub async fn run_with_graceful_shutdown( + tracker: Arc, + bind_to: SocketAddr, + tx_start: oneshot::Sender, + rx_halt: oneshot::Receiver, + ) { + let halt_task = tokio::task::spawn(shutdown_signal_with_message( + rx_halt, + format!("Halting UDP Service Bound to Socket: {bind_to}"), + )); + + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "Starting on: {bind_to}"); + + let socket = tokio::time::timeout(Duration::from_millis(5000), BoundSocket::new(bind_to)) + .await + .expect("it should bind to the socket within five seconds"); + + let bound_socket = match socket { + Ok(socket) => socket, + Err(e) => { + tracing::error!(target: UDP_TRACKER_LOG_TARGET, addr = %bind_to, err = %e, "Udp::run_with_graceful_shutdown panic! (error when building socket)" ); + panic!("could not bind to socket!"); + } + }; + + let address = bound_socket.address(); + let local_udp_url = bound_socket.url().to_string(); + + tracing::info!(target: UDP_TRACKER_LOG_TARGET, "{STARTED_ON}: {local_udp_url}"); + + let receiver = Receiver::new(bound_socket.into()); + + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, local_udp_url, "Udp::run_with_graceful_shutdown (spawning main loop)"); + + let running = { + let local_addr = local_udp_url.clone(); + tokio::task::spawn(async move { + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_with_graceful_shutdown::task (listening...)"); + let () = Self::run_udp_server_main(receiver, tracker.clone()).await; + }) + }; + + tx_start + .send(Started { address }) + .expect("the UDP Tracker service should not be dropped"); + + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_udp_url, "Udp::run_with_graceful_shutdown (started)"); + + let stop = running.abort_handle(); + + select! { + _ = running => { tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_udp_url, "Udp::run_with_graceful_shutdown (stopped)"); }, + _ = halt_task => { tracing::debug!(target: UDP_TRACKER_LOG_TARGET, local_udp_url, "Udp::run_with_graceful_shutdown (halting)"); } + } + stop.abort(); + + tokio::task::yield_now().await; // lets allow the other threads to complete. + } + + #[must_use] + pub fn check(binding: &SocketAddr) -> ServiceHealthCheckJob { + let binding = *binding; + let info = format!("checking the udp tracker health check at: {binding}"); + + let job = tokio::spawn(async move { check(&binding).await }); + + ServiceHealthCheckJob::new(binding, info, job) + } + + async fn run_udp_server_main(mut receiver: Receiver, tracker: Arc) { + let active_requests = &mut ActiveRequests::default(); + + let addr = receiver.bound_socket_address(); + let local_addr = format!("udp://{addr}"); + + loop { + let processor = Processor::new(receiver.socket.clone(), tracker.clone()); + + if let Some(req) = { + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server (wait for request)"); + receiver.next().await + } { + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server::loop (in)"); + + let req = match req { + Ok(req) => req, + Err(e) => { + if e.kind() == std::io::ErrorKind::Interrupted { + tracing::warn!(target: UDP_TRACKER_LOG_TARGET, local_addr, err = %e, "Udp::run_udp_server::loop (interrupted)"); + return; + } + tracing::error!(target: UDP_TRACKER_LOG_TARGET, local_addr, err = %e, "Udp::run_udp_server::loop break: (got error)"); + break; + } + }; + + // We spawn the new task even if there active requests buffer is + // full. This could seem counterintuitive because we are accepting + // more request and consuming more memory even if the server is + // already busy. However, we "force_push" the new tasks in the + // buffer. That means, in the worst scenario we will abort a + // running task to make place for the new task. + // + // Once concern could be to reach an starvation point were we + // are only adding and removing tasks without given them the + // chance to finish. However, the buffer is yielding before + // aborting one tasks, giving it the chance to finish. + let abort_handle: tokio::task::AbortHandle = tokio::task::spawn(processor.process_request(req)).abort_handle(); + + if abort_handle.is_finished() { + continue; + } + + active_requests.force_push(abort_handle, &local_addr).await; + } else { + tokio::task::yield_now().await; + + // the request iterator returned `None`. + tracing::error!(target: UDP_TRACKER_LOG_TARGET, local_addr, "Udp::run_udp_server breaking: (ran dry, should not happen in production!)"); + break; + } + } + } +} diff --git a/src/servers/udp/server/mod.rs b/src/servers/udp/server/mod.rs new file mode 100644 index 000000000..16133e21b --- /dev/null +++ b/src/servers/udp/server/mod.rs @@ -0,0 +1,169 @@ +//! Module to handle the UDP server instances. +use std::fmt::Debug; + +use super::RawRequest; + +pub mod bound_socket; +pub mod launcher; +pub mod processor; +pub mod receiver; +pub mod request_buffer; +pub mod spawner; +pub mod states; + +/// Error that can occur when starting or stopping the UDP server. +/// +/// Some errors triggered while starting the server are: +/// +/// - The server cannot bind to the given address. +/// - It cannot get the bound address. +/// +/// Some errors triggered while stopping the server are: +/// +/// - The [`Server`] cannot send the shutdown signal to the spawned UDP service thread. +#[derive(Debug)] +pub enum UdpError { + /// Any kind of error starting or stopping the server. + Socket(std::io::Error), + Error(String), +} + +/// A UDP server. +/// +/// It's an state machine. Configurations cannot be changed. This struct +/// represents concrete configuration and state. It allows to start and stop the +/// server but always keeping the same configuration. +/// +/// > **NOTICE**: if the configurations changes after running the server it will +/// > reset to the initial value after stopping the server. This struct is not +/// > intended to persist configurations between runs. +#[allow(clippy::module_name_repetitions)] +pub struct Server { + /// The state of the server: `running` or `stopped`. + pub state: S, +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + use std::time::Duration; + + use torrust_tracker_test_helpers::configuration::ephemeral_public; + + use super::spawner::Spawner; + use super::Server; + use crate::bootstrap::app::initialize_with_configuration; + use crate::servers::registar::Registar; + + #[tokio::test] + async fn it_should_be_able_to_start_and_stop() { + let cfg = Arc::new(ephemeral_public()); + let tracker = initialize_with_configuration(&cfg); + let udp_trackers = cfg.udp_trackers.clone().expect("missing UDP trackers configuration"); + let config = &udp_trackers[0]; + let bind_to = config.bind_address; + let register = &Registar::default(); + + let stopped = Server::new(Spawner::new(bind_to)); + + let started = stopped + .start(tracker, register.give_form()) + .await + .expect("it should start the server"); + + let stopped = started.stop().await.expect("it should stop the server"); + + tokio::time::sleep(Duration::from_secs(1)).await; + + assert_eq!(stopped.state.spawner.bind_to, bind_to); + } + + #[tokio::test] + async fn it_should_be_able_to_start_and_stop_with_wait() { + let cfg = Arc::new(ephemeral_public()); + let tracker = initialize_with_configuration(&cfg); + let config = &cfg.udp_trackers.as_ref().unwrap().first().unwrap(); + let bind_to = config.bind_address; + let register = &Registar::default(); + + let stopped = Server::new(Spawner::new(bind_to)); + + let started = stopped + .start(tracker, register.give_form()) + .await + .expect("it should start the server"); + + tokio::time::sleep(Duration::from_secs(1)).await; + + let stopped = started.stop().await.expect("it should stop the server"); + + tokio::time::sleep(Duration::from_secs(1)).await; + + assert_eq!(stopped.state.spawner.bind_to, bind_to); + } +} + +/// Todo: submit test to tokio documentation. +#[cfg(test)] +mod test_tokio { + use std::sync::Arc; + use std::time::Duration; + + use tokio::sync::Barrier; + use tokio::task::JoinSet; + + #[tokio::test] + async fn test_barrier_with_aborted_tasks() { + // Create a barrier that requires 10 tasks to proceed. + let barrier = Arc::new(Barrier::new(10)); + let mut tasks = JoinSet::default(); + let mut handles = Vec::default(); + + // Set Barrier to 9/10. + for _ in 0..9 { + let c = barrier.clone(); + handles.push(tasks.spawn(async move { + c.wait().await; + })); + } + + // Abort two tasks: Barrier: 7/10. + for _ in 0..2 { + if let Some(handle) = handles.pop() { + handle.abort(); + } + } + + // Spawn a single task: Barrier 8/10. + let c = barrier.clone(); + handles.push(tasks.spawn(async move { + c.wait().await; + })); + + // give a chance fro the barrier to release. + tokio::time::sleep(Duration::from_millis(50)).await; + + // assert that the barrier isn't removed, i.e. 8, not 10. + for h in &handles { + assert!(!h.is_finished()); + } + + // Spawn two more tasks to trigger the barrier release: Barrier 10/10. + for _ in 0..2 { + let c = barrier.clone(); + handles.push(tasks.spawn(async move { + c.wait().await; + })); + } + + // give a chance fro the barrier to release. + tokio::time::sleep(Duration::from_millis(50)).await; + + // assert that the barrier has been triggered + for h in &handles { + assert!(h.is_finished()); + } + + tasks.shutdown().await; + } +} diff --git a/src/servers/udp/server/processor.rs b/src/servers/udp/server/processor.rs new file mode 100644 index 000000000..e633a2358 --- /dev/null +++ b/src/servers/udp/server/processor.rs @@ -0,0 +1,66 @@ +use std::io::Cursor; +use std::net::SocketAddr; +use std::sync::Arc; + +use aquatic_udp_protocol::Response; + +use super::bound_socket::BoundSocket; +use crate::core::Tracker; +use crate::servers::udp::{handlers, RawRequest, UDP_TRACKER_LOG_TARGET}; + +pub struct Processor { + socket: Arc, + tracker: Arc, +} + +impl Processor { + pub fn new(socket: Arc, tracker: Arc) -> Self { + Self { socket, tracker } + } + + pub async fn process_request(self, request: RawRequest) { + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, request = %request.from, "Udp::process_request (receiving)"); + + let from = request.from; + let response = handlers::handle_packet(request, &self.tracker, self.socket.address()).await; + self.send_response(from, response).await; + } + + async fn send_response(self, to: SocketAddr, response: Response) { + let response_type = match &response { + Response::Connect(_) => "Connect".to_string(), + Response::AnnounceIpv4(_) => "AnnounceIpv4".to_string(), + Response::AnnounceIpv6(_) => "AnnounceIpv6".to_string(), + Response::Scrape(_) => "Scrape".to_string(), + Response::Error(e) => format!("Error: {e:?}"), + }; + + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, target = ?to, response_type, "Udp::send_response (sending)"); + + let mut writer = Cursor::new(Vec::with_capacity(200)); + + match response.write_bytes(&mut writer) { + Ok(()) => { + let bytes_count = writer.get_ref().len(); + let payload = writer.get_ref(); + + tracing::debug!(target: UDP_TRACKER_LOG_TARGET, ?to, bytes_count, "Udp::send_response (sending...)" ); + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, ?to, bytes_count, ?payload, "Udp::send_response (sending...)"); + + self.send_packet(&to, payload).await; + + tracing::trace!(target:UDP_TRACKER_LOG_TARGET, ?to, bytes_count, "Udp::send_response (sent)"); + } + Err(e) => { + tracing::error!(target: UDP_TRACKER_LOG_TARGET, ?to, response_type, err = %e, "Udp::send_response (error)"); + } + } + } + + async fn send_packet(&self, remote_addr: &SocketAddr, payload: &[u8]) { + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, to = %remote_addr, ?payload, "Udp::send_response (sending)"); + + // doesn't matter if it reaches or not + drop(self.socket.send_to(payload, remote_addr).await); + } +} diff --git a/src/servers/udp/server/receiver.rs b/src/servers/udp/server/receiver.rs new file mode 100644 index 000000000..0176930a4 --- /dev/null +++ b/src/servers/udp/server/receiver.rs @@ -0,0 +1,54 @@ +use std::cell::RefCell; +use std::net::SocketAddr; +use std::pin::Pin; +use std::sync::Arc; +use std::task::{Context, Poll}; + +use futures::Stream; + +use super::bound_socket::BoundSocket; +use super::RawRequest; +use crate::shared::bit_torrent::tracker::udp::MAX_PACKET_SIZE; + +pub struct Receiver { + pub socket: Arc, + data: RefCell<[u8; MAX_PACKET_SIZE]>, +} + +impl Receiver { + #[must_use] + pub fn new(bound_socket: Arc) -> Self { + Receiver { + socket: bound_socket, + data: RefCell::new([0; MAX_PACKET_SIZE]), + } + } + + pub fn bound_socket_address(&self) -> SocketAddr { + self.socket.address() + } +} + +impl Stream for Receiver { + type Item = std::io::Result; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let mut buf = *self.data.borrow_mut(); + let mut buf = tokio::io::ReadBuf::new(&mut buf); + + let Poll::Ready(ready) = self.socket.poll_recv_from(cx, &mut buf) else { + return Poll::Pending; + }; + + let res = match ready { + Ok(from) => { + let payload = buf.filled().to_vec(); + let request = RawRequest { payload, from }; + Some(Ok(request)) + } + Err(err) => Some(Err(err)), + }; + + Poll::Ready(res) + } +} diff --git a/src/servers/udp/server/request_buffer.rs b/src/servers/udp/server/request_buffer.rs new file mode 100644 index 000000000..ffbd9565d --- /dev/null +++ b/src/servers/udp/server/request_buffer.rs @@ -0,0 +1,140 @@ +use ringbuf::traits::{Consumer, Observer, Producer}; +use ringbuf::StaticRb; +use tokio::task::AbortHandle; + +use crate::servers::udp::UDP_TRACKER_LOG_TARGET; + +/// A ring buffer for managing active UDP request abort handles. +/// +/// The `ActiveRequests` struct maintains a fixed-size ring buffer of abort +/// handles for UDP request processor tasks. It ensures that at most 50 requests +/// are handled concurrently, and provides mechanisms to handle buffer overflow +/// by removing finished or oldest unfinished tasks. +#[derive(Default)] +pub struct ActiveRequests { + rb: StaticRb, // The number of requests handled simultaneously. +} + +impl std::fmt::Debug for ActiveRequests { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let (left, right) = &self.rb.as_slices(); + let dbg = format!("capacity: {}, left: {left:?}, right: {right:?}", &self.rb.capacity()); + f.debug_struct("ActiveRequests").field("rb", &dbg).finish() + } +} + +impl Drop for ActiveRequests { + fn drop(&mut self) { + for h in self.rb.pop_iter() { + if !h.is_finished() { + h.abort(); + } + } + } +} + +impl ActiveRequests { + /// Inserts an abort handle for a UDP request processor task. + /// + /// If the buffer is full, this method attempts to make space by: + /// + /// 1. Removing finished tasks. + /// 2. Removing the oldest unfinished task if no finished tasks are found. + /// + /// # Panics + /// + /// This method will panic if it cannot make space for adding a new handle. + /// + /// # Arguments + /// + /// * `abort_handle` - The `AbortHandle` for the UDP request processor task. + /// * `local_addr` - A string slice representing the local address for logging. + pub async fn force_push(&mut self, new_task: AbortHandle, local_addr: &str) { + // Attempt to add the new handle to the buffer. + match self.rb.try_push(new_task) { + Ok(()) => { + // Successfully added the task, no further action needed. + } + Err(new_task) => { + // Buffer is full, attempt to make space. + + let mut finished: u64 = 0; + let mut unfinished_task = None; + + for old_task in self.rb.pop_iter() { + // We found a finished tasks ... increase the counter and + // continue searching for more and ... + if old_task.is_finished() { + finished += 1; + continue; + } + + // The current removed tasks is not finished. + + // Give it a second chance to finish. + tokio::task::yield_now().await; + + // Recheck if it finished ... increase the counter and + // continue searching for more and ... + if old_task.is_finished() { + finished += 1; + continue; + } + + // At this point we found a "definitive" unfinished task. + + // Log unfinished task. + tracing::debug!( + target: UDP_TRACKER_LOG_TARGET, + local_addr, + removed_count = finished, + "Udp::run_udp_server::loop (got unfinished task)" + ); + + // If no finished tasks were found, abort the current + // unfinished task. + if finished == 0 { + // We make place aborting this task. + old_task.abort(); + + tracing::warn!( + target: UDP_TRACKER_LOG_TARGET, + local_addr, + "Udp::run_udp_server::loop aborting request: (no finished tasks)" + ); + + break; + } + + // At this point we found at least one finished task, but the + // current one is not finished and it was removed from the + // buffer, so we need to re-insert in in the buffer. + + // Save the unfinished task for re-entry. + unfinished_task = Some(old_task); + } + + // After this point there can't be a race condition because only + // one thread owns the active buffer. There is no way for the + // buffer to be full again. That means the "expects" should + // never happen. + + // Reinsert the unfinished task if any. + if let Some(h) = unfinished_task { + self.rb.try_push(h).expect("it was previously inserted"); + } + + // Insert the new task. + // + // Notice that space has already been made for this new task in + // the buffer. One or many old task have already been finished + // or yielded, freeing space in the buffer. Or a single + // unfinished task has been aborted to make space for this new + // task. + if !new_task.is_finished() { + self.rb.try_push(new_task).expect("it should have space for this new task."); + } + } + }; + } +} diff --git a/src/servers/udp/server/spawner.rs b/src/servers/udp/server/spawner.rs new file mode 100644 index 000000000..e4612fbe0 --- /dev/null +++ b/src/servers/udp/server/spawner.rs @@ -0,0 +1,38 @@ +//! A thin wrapper for tokio spawn to launch the UDP server launcher as a new task. +use std::net::SocketAddr; +use std::sync::Arc; + +use derive_more::Constructor; +use tokio::sync::oneshot; +use tokio::task::JoinHandle; + +use super::launcher::Launcher; +use crate::bootstrap::jobs::Started; +use crate::core::Tracker; +use crate::servers::signals::Halted; + +#[derive(Constructor, Copy, Clone, Debug)] +pub struct Spawner { + pub bind_to: SocketAddr, +} + +impl Spawner { + /// It spawns a new task to run the UDP server instance. + /// + /// # Panics + /// + /// It would panic if unable to resolve the `local_addr` from the supplied ´socket´. + pub fn spawn_launcher( + &self, + tracker: Arc, + tx_start: oneshot::Sender, + rx_halt: oneshot::Receiver, + ) -> JoinHandle { + let spawner = Self::new(self.bind_to); + + tokio::spawn(async move { + Launcher::run_with_graceful_shutdown(tracker, spawner.bind_to, tx_start, rx_halt).await; + spawner + }) + } +} diff --git a/src/servers/udp/server/states.rs b/src/servers/udp/server/states.rs new file mode 100644 index 000000000..d0a2e4e8a --- /dev/null +++ b/src/servers/udp/server/states.rs @@ -0,0 +1,115 @@ +use std::fmt::Debug; +use std::net::SocketAddr; +use std::sync::Arc; + +use derive_more::Constructor; +use tokio::task::JoinHandle; + +use super::spawner::Spawner; +use super::{Server, UdpError}; +use crate::bootstrap::jobs::Started; +use crate::core::Tracker; +use crate::servers::registar::{ServiceRegistration, ServiceRegistrationForm}; +use crate::servers::signals::Halted; +use crate::servers::udp::server::launcher::Launcher; +use crate::servers::udp::UDP_TRACKER_LOG_TARGET; + +/// A UDP server instance controller with no UDP instance running. +#[allow(clippy::module_name_repetitions)] +pub type StoppedUdpServer = Server; + +/// A UDP server instance controller with a running UDP instance. +#[allow(clippy::module_name_repetitions)] +pub type RunningUdpServer = Server; + +/// A stopped UDP server state. + +pub struct Stopped { + pub spawner: Spawner, +} + +/// A running UDP server state. +#[derive(Debug, Constructor)] +pub struct Running { + /// The address where the server is bound. + pub binding: SocketAddr, + pub halt_task: tokio::sync::oneshot::Sender, + pub task: JoinHandle, +} + +impl Server { + /// Creates a new `UdpServer` instance in `stopped`state. + #[must_use] + pub fn new(spawner: Spawner) -> Self { + Self { + state: Stopped { spawner }, + } + } + + /// It starts the server and returns a `UdpServer` controller in `running` + /// state. + /// + /// # Errors + /// + /// Will return `Err` if UDP can't bind to given bind address. + /// + /// # Panics + /// + /// It panics if unable to receive the bound socket address from service. + /// + pub async fn start(self, tracker: Arc, form: ServiceRegistrationForm) -> Result, std::io::Error> { + let (tx_start, rx_start) = tokio::sync::oneshot::channel::(); + let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); + + assert!(!tx_halt.is_closed(), "Halt channel for UDP tracker should be open"); + + // May need to wrap in a task to about a tokio bug. + let task = self.state.spawner.spawn_launcher(tracker, tx_start, rx_halt); + + let binding = rx_start.await.expect("it should be able to start the service").address; + let local_addr = format!("udp://{binding}"); + + form.send(ServiceRegistration::new(binding, Launcher::check)) + .expect("it should be able to send service registration"); + + let running_udp_server: Server = Server { + state: Running { + binding, + halt_task: tx_halt, + task, + }, + }; + + tracing::trace!(target: UDP_TRACKER_LOG_TARGET, local_addr, "UdpServer::start (running)"); + + Ok(running_udp_server) + } +} + +impl Server { + /// It stops the server and returns a `UdpServer` controller in `stopped` + /// state. + /// + /// # Errors + /// + /// Will return `Err` if the oneshot channel to send the stop signal + /// has already been called once. + /// + /// # Panics + /// + /// It panics if unable to shutdown service. + pub async fn stop(self) -> Result, UdpError> { + self.state + .halt_task + .send(Halted::Normal) + .map_err(|e| UdpError::Error(e.to_string()))?; + + let launcher = self.state.task.await.expect("it should shutdown service"); + + let stopped_api_server: Server = Server { + state: Stopped { spawner: launcher }, + }; + + Ok(stopped_api_server) + } +} diff --git a/src/shared/bit_torrent/common.rs b/src/shared/bit_torrent/common.rs index fd52e098c..46026ac47 100644 --- a/src/shared/bit_torrent/common.rs +++ b/src/shared/bit_torrent/common.rs @@ -1,11 +1,8 @@ //! `BitTorrent` protocol primitive types //! //! [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; -use serde::{Deserialize, Serialize}; /// The maximum number of torrents that can be returned in an `scrape` response. -/// It's also the maximum number of peers returned in an `announce` response. /// /// The [BEP 15. UDP Tracker Protocol for `BitTorrent`](https://www.bittorrent.org/beps/bep_0015.html) /// defines this limit: @@ -20,37 +17,6 @@ pub const MAX_SCRAPE_TORRENTS: u8 = 74; /// HTTP tracker authentication key length. /// -/// See function to [`generate`](crate::tracker::auth::generate) the -/// [`ExpiringKeys`](crate::tracker::auth::ExpiringKey) for more information. +/// For more information see function [`generate_key`](crate::core::auth::generate_key) to generate the +/// [`PeerKey`](crate::core::auth::PeerKey). pub const AUTH_KEY_LENGTH: usize = 32; - -#[repr(u32)] -#[derive(Serialize, Deserialize, Debug, PartialEq, Eq, Clone)] -enum Actions { - // todo: it seems this enum is not used anywhere. Values match the ones in - // aquatic_udp_protocol::request::Request::from_bytes. - Connect = 0, - Announce = 1, - Scrape = 2, - Error = 3, -} - -/// Announce events. Described on the -/// [BEP 3. The `BitTorrent` Protocol Specification](https://www.bittorrent.org/beps/bep_0003.html) -#[derive(Serialize, Deserialize)] -#[serde(remote = "AnnounceEvent")] -pub enum AnnounceEventDef { - /// The peer has started downloading the torrent. - Started, - /// The peer has ceased downloading the torrent. - Stopped, - /// The peer has completed downloading the torrent. - Completed, - /// This is one of the announcements done at regular intervals. - None, -} - -/// Number of bytes downloaded, uploaded or pending to download (left) by the peer. -#[derive(Serialize, Deserialize)] -#[serde(remote = "NumberOfBytes")] -pub struct NumberOfBytesDef(pub i64); diff --git a/src/shared/bit_torrent/info_hash.rs b/src/shared/bit_torrent/info_hash.rs index 20c3cb38b..506c37758 100644 --- a/src/shared/bit_torrent/info_hash.rs +++ b/src/shared/bit_torrent/info_hash.rs @@ -129,169 +129,38 @@ //! You can hash that byte string with //! //! The result is a 20-char string: `5452869BE36F9F3350CCEE6B4544E7E76CAAADAB` -use std::panic::Location; -use thiserror::Error; +use torrust_tracker_primitives::info_hash::InfoHash; -/// `BitTorrent` Info Hash v1 -#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] -pub struct InfoHash(pub [u8; 20]); +pub mod fixture { + use std::hash::{DefaultHasher, Hash, Hasher}; -const INFO_HASH_BYTES_LEN: usize = 20; + use super::InfoHash; -impl InfoHash { - /// Create a new `InfoHash` from a byte slice. + /// Generate as semi-stable pseudo-random infohash /// - /// # Panics + /// Note: If the [`DefaultHasher`] implementation changes + /// so will the resulting info-hashes. /// - /// Will panic if byte slice does not contains the exact amount of bytes need for the `InfoHash`. - #[must_use] - pub fn from_bytes(bytes: &[u8]) -> Self { - assert_eq!(bytes.len(), INFO_HASH_BYTES_LEN); - let mut ret = Self([0u8; INFO_HASH_BYTES_LEN]); - ret.0.clone_from_slice(bytes); - ret - } - - /// Returns the `InfoHash` internal byte array. - #[must_use] - pub fn bytes(&self) -> [u8; 20] { - self.0 - } - - /// Returns the `InfoHash` as a hex string. + /// The results should not be relied upon between versions. #[must_use] - pub fn to_hex_string(&self) -> String { - self.to_string() - } -} - -impl std::fmt::Display for InfoHash { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let mut chars = [0u8; 40]; - binascii::bin2hex(&self.0, &mut chars).expect("failed to hexlify"); - write!(f, "{}", std::str::from_utf8(&chars).unwrap()) - } -} - -impl std::str::FromStr for InfoHash { - type Err = binascii::ConvertError; - - fn from_str(s: &str) -> Result { - let mut i = Self([0u8; 20]); - if s.len() != 40 { - return Err(binascii::ConvertError::InvalidInputLength); - } - binascii::hex2bin(s.as_bytes(), &mut i.0)?; - Ok(i) - } -} - -impl Ord for InfoHash { - fn cmp(&self, other: &Self) -> std::cmp::Ordering { - self.0.cmp(&other.0) - } -} - -impl std::cmp::PartialOrd for InfoHash { - fn partial_cmp(&self, other: &InfoHash) -> Option { - Some(self.cmp(other)) - } -} + pub fn gen_seeded_infohash(seed: &u64) -> InfoHash { + let mut buf_a: [[u8; 8]; 4] = Default::default(); + let mut buf_b = InfoHash::default(); -impl std::convert::From<&[u8]> for InfoHash { - fn from(data: &[u8]) -> InfoHash { - assert_eq!(data.len(), 20); - let mut ret = InfoHash([0u8; 20]); - ret.0.clone_from_slice(data); - ret - } -} - -impl std::convert::From<[u8; 20]> for InfoHash { - fn from(val: [u8; 20]) -> Self { - InfoHash(val) - } -} - -/// Errors that can occur when converting from a `Vec` to an `InfoHash`. -#[derive(Error, Debug)] -pub enum ConversionError { - /// Not enough bytes for infohash. An infohash is 20 bytes. - #[error("not enough bytes for infohash: {message} {location}")] - NotEnoughBytes { - location: &'static Location<'static>, - message: String, - }, - /// Too many bytes for infohash. An infohash is 20 bytes. - #[error("too many bytes for infohash: {message} {location}")] - TooManyBytes { - location: &'static Location<'static>, - message: String, - }, -} - -impl TryFrom> for InfoHash { - type Error = ConversionError; + let mut hasher = DefaultHasher::new(); + seed.hash(&mut hasher); - fn try_from(bytes: Vec) -> Result { - if bytes.len() < INFO_HASH_BYTES_LEN { - return Err(ConversionError::NotEnoughBytes { - location: Location::caller(), - message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, - }); - } - if bytes.len() > INFO_HASH_BYTES_LEN { - return Err(ConversionError::TooManyBytes { - location: Location::caller(), - message: format! {"got {} bytes, expected {}", bytes.len(), INFO_HASH_BYTES_LEN}, - }); + for u in &mut buf_a { + seed.hash(&mut hasher); + *u = hasher.finish().to_le_bytes(); } - Ok(Self::from_bytes(&bytes)) - } -} -impl serde::ser::Serialize for InfoHash { - fn serialize(&self, serializer: S) -> Result { - let mut buffer = [0u8; 40]; - let bytes_out = binascii::bin2hex(&self.0, &mut buffer).ok().unwrap(); - let str_out = std::str::from_utf8(bytes_out).unwrap(); - serializer.serialize_str(str_out) - } -} - -impl<'de> serde::de::Deserialize<'de> for InfoHash { - fn deserialize>(des: D) -> Result { - des.deserialize_str(InfoHashVisitor) - } -} - -struct InfoHashVisitor; - -impl<'v> serde::de::Visitor<'v> for InfoHashVisitor { - type Value = InfoHash; - - fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "a 40 character long hash") - } - - fn visit_str(self, v: &str) -> Result { - if v.len() != 40 { - return Err(serde::de::Error::invalid_value( - serde::de::Unexpected::Str(v), - &"a 40 character long string", - )); + for (a, b) in buf_a.iter().flat_map(|a| a.iter()).zip(buf_b.0.iter_mut()) { + *b = *a; } - let mut res = InfoHash([0u8; 20]); - - if binascii::hex2bin(v.as_bytes(), &mut res.0).is_err() { - return Err(serde::de::Error::invalid_value( - serde::de::Unexpected::Str(v), - &"a hexadecimal string", - )); - }; - Ok(res) + buf_b } } diff --git a/src/shared/bit_torrent/mod.rs b/src/shared/bit_torrent/mod.rs index eba90b4ab..8074661be 100644 --- a/src/shared/bit_torrent/mod.rs +++ b/src/shared/bit_torrent/mod.rs @@ -69,3 +69,4 @@ //!Bencode & bdecode in your browser | pub mod common; pub mod info_hash; +pub mod tracker; diff --git a/src/shared/bit_torrent/tracker/http/client/mod.rs b/src/shared/bit_torrent/tracker/http/client/mod.rs new file mode 100644 index 000000000..4c70cd68b --- /dev/null +++ b/src/shared/bit_torrent/tracker/http/client/mod.rs @@ -0,0 +1,204 @@ +pub mod requests; +pub mod responses; + +use std::net::IpAddr; +use std::sync::Arc; +use std::time::Duration; + +use hyper::StatusCode; +use requests::{announce, scrape}; +use reqwest::{Response, Url}; +use thiserror::Error; + +use crate::core::auth::Key; + +#[derive(Debug, Clone, Error)] +pub enum Error { + #[error("Failed to Build a Http Client: {err:?}")] + ClientBuildingError { err: Arc }, + #[error("Failed to get a response: {err:?}")] + ResponseError { err: Arc }, + #[error("Returned a non-success code: \"{code}\" with the response: \"{response:?}\"")] + UnsuccessfulResponse { code: StatusCode, response: Arc }, +} + +/// HTTP Tracker Client +pub struct Client { + client: reqwest::Client, + base_url: Url, + key: Option, +} + +/// URL components in this context: +/// +/// ```text +/// http://127.0.0.1:62304/announce/YZ....rJ?info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 +/// \_____________________/\_______________/ \__________________________________________________________/ +/// | | | +/// base url path query +/// ``` +impl Client { + /// # Errors + /// + /// This method fails if the client builder fails. + pub fn new(base_url: Url, timeout: Duration) -> Result { + let client = reqwest::Client::builder() + .timeout(timeout) + .build() + .map_err(|e| Error::ClientBuildingError { err: e.into() })?; + + Ok(Self { + base_url, + client, + key: None, + }) + } + + /// Creates the new client binding it to an specific local address. + /// + /// # Errors + /// + /// This method fails if the client builder fails. + pub fn bind(base_url: Url, timeout: Duration, local_address: IpAddr) -> Result { + let client = reqwest::Client::builder() + .timeout(timeout) + .local_address(local_address) + .build() + .map_err(|e| Error::ClientBuildingError { err: e.into() })?; + + Ok(Self { + base_url, + client, + key: None, + }) + } + + /// # Errors + /// + /// This method fails if the client builder fails. + pub fn authenticated(base_url: Url, timeout: Duration, key: Key) -> Result { + let client = reqwest::Client::builder() + .timeout(timeout) + .build() + .map_err(|e| Error::ClientBuildingError { err: e.into() })?; + + Ok(Self { + base_url, + client, + key: Some(key), + }) + } + + /// # Errors + /// + /// This method fails if the returned response was not successful + pub async fn announce(&self, query: &announce::Query) -> Result { + let response = self.get(&self.build_announce_path_and_query(query)).await?; + + if response.status().is_success() { + Ok(response) + } else { + Err(Error::UnsuccessfulResponse { + code: response.status(), + response: response.into(), + }) + } + } + + /// # Errors + /// + /// This method fails if the returned response was not successful + pub async fn scrape(&self, query: &scrape::Query) -> Result { + let response = self.get(&self.build_scrape_path_and_query(query)).await?; + + if response.status().is_success() { + Ok(response) + } else { + Err(Error::UnsuccessfulResponse { + code: response.status(), + response: response.into(), + }) + } + } + + /// # Errors + /// + /// This method fails if the returned response was not successful + pub async fn announce_with_header(&self, query: &announce::Query, key: &str, value: &str) -> Result { + let response = self + .get_with_header(&self.build_announce_path_and_query(query), key, value) + .await?; + + if response.status().is_success() { + Ok(response) + } else { + Err(Error::UnsuccessfulResponse { + code: response.status(), + response: response.into(), + }) + } + } + + /// # Errors + /// + /// This method fails if the returned response was not successful + pub async fn health_check(&self) -> Result { + let response = self.get(&self.build_path("health_check")).await?; + + if response.status().is_success() { + Ok(response) + } else { + Err(Error::UnsuccessfulResponse { + code: response.status(), + response: response.into(), + }) + } + } + + /// # Errors + /// + /// This method fails if there was an error while sending request. + pub async fn get(&self, path: &str) -> Result { + self.client + .get(self.build_url(path)) + .send() + .await + .map_err(|e| Error::ResponseError { err: e.into() }) + } + + /// # Errors + /// + /// This method fails if there was an error while sending request. + pub async fn get_with_header(&self, path: &str, key: &str, value: &str) -> Result { + self.client + .get(self.build_url(path)) + .header(key, value) + .send() + .await + .map_err(|e| Error::ResponseError { err: e.into() }) + } + + fn build_announce_path_and_query(&self, query: &announce::Query) -> String { + format!("{}?{query}", self.build_path("announce")) + } + + fn build_scrape_path_and_query(&self, query: &scrape::Query) -> String { + format!("{}?{query}", self.build_path("scrape")) + } + + fn build_path(&self, path: &str) -> String { + match &self.key { + Some(key) => format!("{path}/{key}"), + None => path.to_string(), + } + } + + fn build_url(&self, path: &str) -> String { + let base_url = self.base_url(); + format!("{base_url}{path}") + } + + fn base_url(&self) -> String { + self.base_url.to_string() + } +} diff --git a/src/shared/bit_torrent/tracker/http/client/requests/announce.rs b/src/shared/bit_torrent/tracker/http/client/requests/announce.rs new file mode 100644 index 000000000..b872e76e9 --- /dev/null +++ b/src/shared/bit_torrent/tracker/http/client/requests/announce.rs @@ -0,0 +1,275 @@ +use std::fmt; +use std::net::{IpAddr, Ipv4Addr}; +use std::str::FromStr; + +use serde_repr::Serialize_repr; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer; + +use crate::shared::bit_torrent::tracker::http::{percent_encode_byte_array, ByteArray20}; + +pub struct Query { + pub info_hash: ByteArray20, + pub peer_addr: IpAddr, + pub downloaded: BaseTenASCII, + pub uploaded: BaseTenASCII, + pub peer_id: ByteArray20, + pub port: PortNumber, + pub left: BaseTenASCII, + pub event: Option, + pub compact: Option, +} + +impl fmt::Display for Query { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.build()) + } +} + +/// HTTP Tracker Announce Request: +/// +/// +/// +/// Some parameters in the specification are not implemented in this tracker yet. +impl Query { + /// It builds the URL query component for the announce request. + /// + /// This custom URL query params encoding is needed because `reqwest` does not allow + /// bytes arrays in query parameters. More info on this issue: + /// + /// + #[must_use] + pub fn build(&self) -> String { + self.params().to_string() + } + + #[must_use] + pub fn params(&self) -> QueryParams { + QueryParams::from(self) + } +} + +pub type BaseTenASCII = u64; +pub type PortNumber = u16; + +pub enum Event { + //Started, + //Stopped, + Completed, +} + +impl fmt::Display for Event { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + //Event::Started => write!(f, "started"), + //Event::Stopped => write!(f, "stopped"), + Event::Completed => write!(f, "completed"), + } + } +} + +#[derive(Serialize_repr, PartialEq, Debug)] +#[repr(u8)] +pub enum Compact { + Accepted = 1, + NotAccepted = 0, +} + +impl fmt::Display for Compact { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Compact::Accepted => write!(f, "1"), + Compact::NotAccepted => write!(f, "0"), + } + } +} + +pub struct QueryBuilder { + announce_query: Query, +} + +impl QueryBuilder { + /// # Panics + /// + /// Will panic if the default info-hash value is not a valid info-hash. + #[must_use] + pub fn with_default_values() -> QueryBuilder { + let default_announce_query = Query { + info_hash: InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap().0, // # DevSkim: ignore DS173237 + peer_addr: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 88)), + downloaded: 0, + uploaded: 0, + peer_id: peer::Id(*b"-qB00000000000000001").0, + port: 17548, + left: 0, + event: Some(Event::Completed), + compact: Some(Compact::NotAccepted), + }; + Self { + announce_query: default_announce_query, + } + } + + #[must_use] + pub fn with_info_hash(mut self, info_hash: &InfoHash) -> Self { + self.announce_query.info_hash = info_hash.0; + self + } + + #[must_use] + pub fn with_peer_id(mut self, peer_id: &peer::Id) -> Self { + self.announce_query.peer_id = peer_id.0; + self + } + + #[must_use] + pub fn with_compact(mut self, compact: Compact) -> Self { + self.announce_query.compact = Some(compact); + self + } + + #[must_use] + pub fn with_peer_addr(mut self, peer_addr: &IpAddr) -> Self { + self.announce_query.peer_addr = *peer_addr; + self + } + + #[must_use] + pub fn without_compact(mut self) -> Self { + self.announce_query.compact = None; + self + } + + #[must_use] + pub fn query(self) -> Query { + self.announce_query + } +} + +/// It contains all the GET parameters that can be used in a HTTP Announce request. +/// +/// Sample Announce URL with all the GET parameters (mandatory and optional): +/// +/// ```text +/// http://127.0.0.1:7070/announce? +/// info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 (mandatory) +/// peer_addr=192.168.1.88 +/// downloaded=0 +/// uploaded=0 +/// peer_id=%2DqB00000000000000000 (mandatory) +/// port=17548 (mandatory) +/// left=0 +/// event=completed +/// compact=0 +/// ``` +pub struct QueryParams { + pub info_hash: Option, + pub peer_addr: Option, + pub downloaded: Option, + pub uploaded: Option, + pub peer_id: Option, + pub port: Option, + pub left: Option, + pub event: Option, + pub compact: Option, +} + +impl std::fmt::Display for QueryParams { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut params = vec![]; + + if let Some(info_hash) = &self.info_hash { + params.push(("info_hash", info_hash)); + } + if let Some(peer_addr) = &self.peer_addr { + params.push(("peer_addr", peer_addr)); + } + if let Some(downloaded) = &self.downloaded { + params.push(("downloaded", downloaded)); + } + if let Some(uploaded) = &self.uploaded { + params.push(("uploaded", uploaded)); + } + if let Some(peer_id) = &self.peer_id { + params.push(("peer_id", peer_id)); + } + if let Some(port) = &self.port { + params.push(("port", port)); + } + if let Some(left) = &self.left { + params.push(("left", left)); + } + if let Some(event) = &self.event { + params.push(("event", event)); + } + if let Some(compact) = &self.compact { + params.push(("compact", compact)); + } + + let query = params + .iter() + .map(|param| format!("{}={}", param.0, param.1)) + .collect::>() + .join("&"); + + write!(f, "{query}") + } +} + +impl QueryParams { + pub fn from(announce_query: &Query) -> Self { + let event = announce_query.event.as_ref().map(std::string::ToString::to_string); + let compact = announce_query.compact.as_ref().map(std::string::ToString::to_string); + + Self { + info_hash: Some(percent_encode_byte_array(&announce_query.info_hash)), + peer_addr: Some(announce_query.peer_addr.to_string()), + downloaded: Some(announce_query.downloaded.to_string()), + uploaded: Some(announce_query.uploaded.to_string()), + peer_id: Some(percent_encode_byte_array(&announce_query.peer_id)), + port: Some(announce_query.port.to_string()), + left: Some(announce_query.left.to_string()), + event, + compact, + } + } + + pub fn remove_optional_params(&mut self) { + // todo: make them optional with the Option<...> in the AnnounceQuery struct + // if they are really optional. So that we can crete a minimal AnnounceQuery + // instead of removing the optional params afterwards. + // + // The original specification on: + // + // says only `ip` and `event` are optional. + // + // On + // says only `ip`, `numwant`, `key` and `trackerid` are optional. + // + // but the server is responding if all these params are not included. + self.peer_addr = None; + self.downloaded = None; + self.uploaded = None; + self.left = None; + self.event = None; + self.compact = None; + } + + /// # Panics + /// + /// Will panic if invalid param name is provided. + pub fn set(&mut self, param_name: &str, param_value: &str) { + match param_name { + "info_hash" => self.info_hash = Some(param_value.to_string()), + "peer_addr" => self.peer_addr = Some(param_value.to_string()), + "downloaded" => self.downloaded = Some(param_value.to_string()), + "uploaded" => self.uploaded = Some(param_value.to_string()), + "peer_id" => self.peer_id = Some(param_value.to_string()), + "port" => self.port = Some(param_value.to_string()), + "left" => self.left = Some(param_value.to_string()), + "event" => self.event = Some(param_value.to_string()), + "compact" => self.compact = Some(param_value.to_string()), + &_ => panic!("Invalid param name for announce query"), + } + } +} diff --git a/src/shared/bit_torrent/tracker/http/client/requests/mod.rs b/src/shared/bit_torrent/tracker/http/client/requests/mod.rs new file mode 100644 index 000000000..776d2dfbf --- /dev/null +++ b/src/shared/bit_torrent/tracker/http/client/requests/mod.rs @@ -0,0 +1,2 @@ +pub mod announce; +pub mod scrape; diff --git a/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs b/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs new file mode 100644 index 000000000..4d12fc2d2 --- /dev/null +++ b/src/shared/bit_torrent/tracker/http/client/requests/scrape.rs @@ -0,0 +1,172 @@ +use std::error::Error; +use std::fmt::{self}; +use std::str::FromStr; + +use torrust_tracker_primitives::info_hash::InfoHash; + +use crate::shared::bit_torrent::tracker::http::{percent_encode_byte_array, ByteArray20}; + +pub struct Query { + pub info_hash: Vec, +} + +impl fmt::Display for Query { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.build()) + } +} + +#[derive(Debug)] +#[allow(dead_code)] +pub struct ConversionError(String); + +impl fmt::Display for ConversionError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Invalid infohash: {}", self.0) + } +} + +impl Error for ConversionError {} + +impl TryFrom<&[String]> for Query { + type Error = ConversionError; + + fn try_from(info_hashes: &[String]) -> Result { + let mut validated_info_hashes: Vec = Vec::new(); + + for info_hash in info_hashes { + let validated_info_hash = InfoHash::from_str(info_hash).map_err(|_| ConversionError(info_hash.clone()))?; + validated_info_hashes.push(validated_info_hash.0); + } + + Ok(Self { + info_hash: validated_info_hashes, + }) + } +} + +impl TryFrom> for Query { + type Error = ConversionError; + + fn try_from(info_hashes: Vec) -> Result { + let mut validated_info_hashes: Vec = Vec::new(); + + for info_hash in info_hashes { + let validated_info_hash = InfoHash::from_str(&info_hash).map_err(|_| ConversionError(info_hash.clone()))?; + validated_info_hashes.push(validated_info_hash.0); + } + + Ok(Self { + info_hash: validated_info_hashes, + }) + } +} + +/// HTTP Tracker Scrape Request: +/// +/// +impl Query { + /// It builds the URL query component for the scrape request. + /// + /// This custom URL query params encoding is needed because `reqwest` does not allow + /// bytes arrays in query parameters. More info on this issue: + /// + /// + #[must_use] + pub fn build(&self) -> String { + self.params().to_string() + } + + #[must_use] + pub fn params(&self) -> QueryParams { + QueryParams::from(self) + } +} + +pub struct QueryBuilder { + scrape_query: Query, +} + +impl Default for QueryBuilder { + fn default() -> Self { + let default_scrape_query = Query { + info_hash: [InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap().0].to_vec(), // # DevSkim: ignore DS173237 + }; + Self { + scrape_query: default_scrape_query, + } + } +} + +impl QueryBuilder { + #[must_use] + pub fn with_one_info_hash(mut self, info_hash: &InfoHash) -> Self { + self.scrape_query.info_hash = [info_hash.0].to_vec(); + self + } + + #[must_use] + pub fn add_info_hash(mut self, info_hash: &InfoHash) -> Self { + self.scrape_query.info_hash.push(info_hash.0); + self + } + + #[must_use] + pub fn query(self) -> Query { + self.scrape_query + } +} + +/// It contains all the GET parameters that can be used in a HTTP Scrape request. +/// +/// The `info_hash` param is the percent encoded of the the 20-byte array info hash. +/// +/// Sample Scrape URL with all the GET parameters: +/// +/// For `IpV4`: +/// +/// ```text +/// http://127.0.0.1:7070/scrape?info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 +/// ``` +/// +/// For `IpV6`: +/// +/// ```text +/// http://[::1]:7070/scrape?info_hash=%9C8B%22%13%E3%0B%FF%21%2B0%C3%60%D2o%9A%02%13d%22 +/// ``` +/// +/// You can add as many info hashes as you want, just adding the same param again. +pub struct QueryParams { + pub info_hash: Vec, +} + +impl QueryParams { + pub fn set_one_info_hash_param(&mut self, info_hash: &str) { + self.info_hash = vec![info_hash.to_string()]; + } +} + +impl std::fmt::Display for QueryParams { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let query = self + .info_hash + .iter() + .map(|info_hash| format!("info_hash={}", &info_hash)) + .collect::>() + .join("&"); + + write!(f, "{query}") + } +} + +impl QueryParams { + pub fn from(scrape_query: &Query) -> Self { + let info_hashes = scrape_query + .info_hash + .iter() + .map(percent_encode_byte_array) + .collect::>(); + + Self { info_hash: info_hashes } + } +} diff --git a/src/shared/bit_torrent/tracker/http/client/responses/announce.rs b/src/shared/bit_torrent/tracker/http/client/responses/announce.rs new file mode 100644 index 000000000..15ec446cb --- /dev/null +++ b/src/shared/bit_torrent/tracker/http/client/responses/announce.rs @@ -0,0 +1,125 @@ +use std::net::{IpAddr, Ipv4Addr, SocketAddr}; + +use serde::{Deserialize, Serialize}; +use torrust_tracker_primitives::peer; + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct Announce { + pub complete: u32, + pub incomplete: u32, + pub interval: u32, + #[serde(rename = "min interval")] + pub min_interval: u32, + pub peers: Vec, // Peers using IPV4 and IPV6 +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct DictionaryPeer { + pub ip: String, + #[serde(rename = "peer id")] + #[serde(with = "serde_bytes")] + pub peer_id: Vec, + pub port: u16, +} + +impl From for DictionaryPeer { + fn from(peer: peer::Peer) -> Self { + DictionaryPeer { + peer_id: peer.peer_id.to_bytes().to_vec(), + ip: peer.peer_addr.ip().to_string(), + port: peer.peer_addr.port(), + } + } +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct DeserializedCompact { + pub complete: u32, + pub incomplete: u32, + pub interval: u32, + #[serde(rename = "min interval")] + pub min_interval: u32, + #[serde(with = "serde_bytes")] + pub peers: Vec, +} + +impl DeserializedCompact { + /// # Errors + /// + /// Will return an error if bytes can't be deserialized. + pub fn from_bytes(bytes: &[u8]) -> Result { + serde_bencode::from_bytes::(bytes) + } +} + +#[derive(Debug, PartialEq)] +pub struct Compact { + // code-review: there could be a way to deserialize this struct directly + // by using serde instead of doing it manually. Or at least using a custom deserializer. + pub complete: u32, + pub incomplete: u32, + pub interval: u32, + pub min_interval: u32, + pub peers: CompactPeerList, +} + +#[derive(Debug, PartialEq)] +pub struct CompactPeerList { + peers: Vec, +} + +impl CompactPeerList { + #[must_use] + pub fn new(peers: Vec) -> Self { + Self { peers } + } +} + +#[derive(Clone, Debug, PartialEq)] +pub struct CompactPeer { + ip: Ipv4Addr, + port: u16, +} + +impl CompactPeer { + /// # Panics + /// + /// Will panic if the provided socket address is a IPv6 IP address. + /// It's not supported for compact peers. + #[must_use] + pub fn new(socket_addr: &SocketAddr) -> Self { + match socket_addr.ip() { + IpAddr::V4(ip) => Self { + ip, + port: socket_addr.port(), + }, + IpAddr::V6(_ip) => panic!("IPV6 is not supported for compact peer"), + } + } + + #[must_use] + pub fn new_from_bytes(bytes: &[u8]) -> Self { + Self { + ip: Ipv4Addr::new(bytes[0], bytes[1], bytes[2], bytes[3]), + port: u16::from_be_bytes([bytes[4], bytes[5]]), + } + } +} + +impl From for Compact { + fn from(compact_announce: DeserializedCompact) -> Self { + let mut peers = vec![]; + + for peer_bytes in compact_announce.peers.chunks_exact(6) { + peers.push(CompactPeer::new_from_bytes(peer_bytes)); + } + + Self { + complete: compact_announce.complete, + incomplete: compact_announce.incomplete, + interval: compact_announce.interval, + min_interval: compact_announce.min_interval, + peers: CompactPeerList::new(peers), + } + } +} diff --git a/src/shared/bit_torrent/tracker/http/client/responses/error.rs b/src/shared/bit_torrent/tracker/http/client/responses/error.rs new file mode 100644 index 000000000..00befdb54 --- /dev/null +++ b/src/shared/bit_torrent/tracker/http/client/responses/error.rs @@ -0,0 +1,7 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +pub struct Error { + #[serde(rename = "failure reason")] + pub failure_reason: String, +} diff --git a/src/shared/bit_torrent/tracker/http/client/responses/mod.rs b/src/shared/bit_torrent/tracker/http/client/responses/mod.rs new file mode 100644 index 000000000..bdc689056 --- /dev/null +++ b/src/shared/bit_torrent/tracker/http/client/responses/mod.rs @@ -0,0 +1,3 @@ +pub mod announce; +pub mod error; +pub mod scrape; diff --git a/src/shared/bit_torrent/tracker/http/client/responses/scrape.rs b/src/shared/bit_torrent/tracker/http/client/responses/scrape.rs new file mode 100644 index 000000000..25a2f0a81 --- /dev/null +++ b/src/shared/bit_torrent/tracker/http/client/responses/scrape.rs @@ -0,0 +1,230 @@ +use std::collections::HashMap; +use std::fmt::Write; +use std::str; + +use serde::ser::SerializeMap; +use serde::{Deserialize, Serialize, Serializer}; +use serde_bencode::value::Value; + +use crate::shared::bit_torrent::tracker::http::{ByteArray20, InfoHash}; + +#[derive(Debug, PartialEq, Default, Deserialize)] +pub struct Response { + pub files: HashMap, +} + +impl Response { + #[must_use] + pub fn with_one_file(info_hash_bytes: ByteArray20, file: File) -> Self { + let mut files: HashMap = HashMap::new(); + files.insert(info_hash_bytes, file); + Self { files } + } + + /// # Errors + /// + /// Will return an error if the deserialized bencoded response can't not be converted into a valid response. + /// + /// # Panics + /// + /// Will panic if it can't deserialize the bencoded response. + pub fn try_from_bencoded(bytes: &[u8]) -> Result { + let scrape_response: DeserializedResponse = + serde_bencode::from_bytes(bytes).expect("provided bytes should be a valid bencoded response"); + Self::try_from(scrape_response) + } +} + +#[derive(Serialize, Deserialize, Debug, PartialEq, Default)] +pub struct File { + pub complete: i64, // The number of active peers that have completed downloading + pub downloaded: i64, // The number of peers that have ever completed downloading + pub incomplete: i64, // The number of active peers that have not completed downloading +} + +impl File { + #[must_use] + pub fn zeroed() -> Self { + Self::default() + } +} + +impl TryFrom for Response { + type Error = BencodeParseError; + + fn try_from(scrape_response: DeserializedResponse) -> Result { + parse_bencoded_response(&scrape_response.files) + } +} + +#[derive(Serialize, Deserialize, Debug, PartialEq)] +struct DeserializedResponse { + pub files: Value, +} + +// Custom serialization for Response +impl Serialize for Response { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut map = serializer.serialize_map(Some(self.files.len()))?; + for (key, value) in &self.files { + // Convert ByteArray20 key to hex string + let hex_key = byte_array_to_hex_string(key); + map.serialize_entry(&hex_key, value)?; + } + map.end() + } +} + +// Helper function to convert ByteArray20 to hex string +fn byte_array_to_hex_string(byte_array: &ByteArray20) -> String { + let mut hex_string = String::with_capacity(byte_array.len() * 2); + for byte in byte_array { + write!(hex_string, "{byte:02x}").expect("Writing to string should never fail"); + } + hex_string +} + +#[derive(Default)] +pub struct ResponseBuilder { + response: Response, +} + +impl ResponseBuilder { + #[must_use] + pub fn add_file(mut self, info_hash_bytes: ByteArray20, file: File) -> Self { + self.response.files.insert(info_hash_bytes, file); + self + } + + #[must_use] + pub fn build(self) -> Response { + self.response + } +} + +#[derive(Debug)] +pub enum BencodeParseError { + InvalidValueExpectedDict { value: Value }, + InvalidValueExpectedInt { value: Value }, + InvalidFileField { value: Value }, + MissingFileField { field_name: String }, +} + +/// It parses a bencoded scrape response into a `Response` struct. +/// +/// For example: +/// +/// ```text +/// d5:filesd20:xxxxxxxxxxxxxxxxxxxxd8:completei11e10:downloadedi13772e10:incompletei19e +/// 20:yyyyyyyyyyyyyyyyyyyyd8:completei21e10:downloadedi206e10:incompletei20eee +/// ``` +/// +/// Response (JSON encoded for readability): +/// +/// ```text +/// { +/// 'files': { +/// 'xxxxxxxxxxxxxxxxxxxx': {'complete': 11, 'downloaded': 13772, 'incomplete': 19}, +/// 'yyyyyyyyyyyyyyyyyyyy': {'complete': 21, 'downloaded': 206, 'incomplete': 20} +/// } +/// } +fn parse_bencoded_response(value: &Value) -> Result { + let mut files: HashMap = HashMap::new(); + + match value { + Value::Dict(dict) => { + for file_element in dict { + let info_hash_byte_vec = file_element.0; + let file_value = file_element.1; + + let file = parse_bencoded_file(file_value).unwrap(); + + files.insert(InfoHash::new(info_hash_byte_vec).bytes(), file); + } + } + _ => return Err(BencodeParseError::InvalidValueExpectedDict { value: value.clone() }), + } + + Ok(Response { files }) +} + +/// It parses a bencoded dictionary into a `File` struct. +/// +/// For example: +/// +/// +/// ```text +/// d8:completei11e10:downloadedi13772e10:incompletei19ee +/// ``` +/// +/// into: +/// +/// ```text +/// File { +/// complete: 11, +/// downloaded: 13772, +/// incomplete: 19, +/// } +/// ``` +fn parse_bencoded_file(value: &Value) -> Result { + let file = match &value { + Value::Dict(dict) => { + let mut complete = None; + let mut downloaded = None; + let mut incomplete = None; + + for file_field in dict { + let field_name = file_field.0; + + let field_value = match file_field.1 { + Value::Int(number) => Ok(*number), + _ => Err(BencodeParseError::InvalidValueExpectedInt { + value: file_field.1.clone(), + }), + }?; + + if field_name == b"complete" { + complete = Some(field_value); + } else if field_name == b"downloaded" { + downloaded = Some(field_value); + } else if field_name == b"incomplete" { + incomplete = Some(field_value); + } else { + return Err(BencodeParseError::InvalidFileField { + value: file_field.1.clone(), + }); + } + } + + if complete.is_none() { + return Err(BencodeParseError::MissingFileField { + field_name: "complete".to_string(), + }); + } + + if downloaded.is_none() { + return Err(BencodeParseError::MissingFileField { + field_name: "downloaded".to_string(), + }); + } + + if incomplete.is_none() { + return Err(BencodeParseError::MissingFileField { + field_name: "incomplete".to_string(), + }); + } + + File { + complete: complete.unwrap(), + downloaded: downloaded.unwrap(), + incomplete: incomplete.unwrap(), + } + } + _ => return Err(BencodeParseError::InvalidValueExpectedDict { value: value.clone() }), + }; + + Ok(file) +} diff --git a/src/shared/bit_torrent/tracker/http/mod.rs b/src/shared/bit_torrent/tracker/http/mod.rs new file mode 100644 index 000000000..15723c1b7 --- /dev/null +++ b/src/shared/bit_torrent/tracker/http/mod.rs @@ -0,0 +1,26 @@ +pub mod client; + +use percent_encoding::NON_ALPHANUMERIC; + +pub type ByteArray20 = [u8; 20]; + +#[must_use] +pub fn percent_encode_byte_array(bytes: &ByteArray20) -> String { + percent_encoding::percent_encode(bytes, NON_ALPHANUMERIC).to_string() +} + +pub struct InfoHash(ByteArray20); + +impl InfoHash { + #[must_use] + pub fn new(vec: &[u8]) -> Self { + let mut byte_array_20: ByteArray20 = Default::default(); + byte_array_20.clone_from_slice(vec); + Self(byte_array_20) + } + + #[must_use] + pub fn bytes(&self) -> ByteArray20 { + self.0 + } +} diff --git a/src/shared/bit_torrent/tracker/mod.rs b/src/shared/bit_torrent/tracker/mod.rs new file mode 100644 index 000000000..b08eaa622 --- /dev/null +++ b/src/shared/bit_torrent/tracker/mod.rs @@ -0,0 +1,2 @@ +pub mod http; +pub mod udp; diff --git a/src/shared/bit_torrent/tracker/udp/client.rs b/src/shared/bit_torrent/tracker/udp/client.rs new file mode 100644 index 000000000..edb8adc85 --- /dev/null +++ b/src/shared/bit_torrent/tracker/udp/client.rs @@ -0,0 +1,270 @@ +use core::result::Result::{Err, Ok}; +use std::io::Cursor; +use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr}; +use std::sync::Arc; +use std::time::Duration; + +use aquatic_udp_protocol::{ConnectRequest, Request, Response, TransactionId}; +use tokio::net::UdpSocket; +use tokio::time; +use torrust_tracker_configuration::DEFAULT_TIMEOUT; +use zerocopy::network_endian::I32; + +use super::Error; +use crate::shared::bit_torrent::tracker::udp::MAX_PACKET_SIZE; + +pub const UDP_CLIENT_LOG_TARGET: &str = "UDP CLIENT"; + +#[allow(clippy::module_name_repetitions)] +#[derive(Debug)] +pub struct UdpClient { + /// The socket to connect to + pub socket: Arc, + + /// Timeout for sending and receiving packets + pub timeout: Duration, +} + +impl UdpClient { + /// Creates a new `UdpClient` bound to the default port and ipv6 address + /// + /// # Errors + /// + /// Will return error if unable to bind to any port or ip address. + /// + async fn bound_to_default_ipv4(timeout: Duration) -> Result { + let addr = SocketAddr::new(Ipv4Addr::UNSPECIFIED.into(), 0); + + Self::bound(addr, timeout).await + } + + /// Creates a new `UdpClient` bound to the default port and ipv6 address + /// + /// # Errors + /// + /// Will return error if unable to bind to any port or ip address. + /// + async fn bound_to_default_ipv6(timeout: Duration) -> Result { + let addr = SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 0); + + Self::bound(addr, timeout).await + } + + /// Creates a new `UdpClient` connected to a Udp server + /// + /// # Errors + /// + /// Will return any errors present in the call stack + /// + pub async fn connected(remote_addr: SocketAddr, timeout: Duration) -> Result { + let client = if remote_addr.is_ipv4() { + Self::bound_to_default_ipv4(timeout).await? + } else { + Self::bound_to_default_ipv6(timeout).await? + }; + + client.connect(remote_addr).await?; + Ok(client) + } + + /// Creates a `[UdpClient]` bound to a Socket. + /// + /// # Panics + /// + /// Panics if unable to get the `local_addr` of the bound socket. + /// + /// # Errors + /// + /// This function will return an error if the binding takes to long + /// or if there is an underlying OS error. + pub async fn bound(addr: SocketAddr, timeout: Duration) -> Result { + tracing::trace!(target: UDP_CLIENT_LOG_TARGET, "binding to socket: {addr:?} ..."); + + let socket = time::timeout(timeout, UdpSocket::bind(addr)) + .await + .map_err(|_| Error::TimeoutWhileBindingToSocket { addr })? + .map_err(|e| Error::UnableToBindToSocket { err: e.into(), addr })?; + + let addr = socket.local_addr().expect("it should get the local address"); + + tracing::debug!(target: UDP_CLIENT_LOG_TARGET, "bound to socket: {addr:?}."); + + let udp_client = Self { + socket: Arc::new(socket), + timeout, + }; + + Ok(udp_client) + } + + /// # Errors + /// + /// Will return error if can't connect to the socket. + pub async fn connect(&self, remote_addr: SocketAddr) -> Result<(), Error> { + tracing::trace!(target: UDP_CLIENT_LOG_TARGET, "connecting to remote: {remote_addr:?} ..."); + + let () = time::timeout(self.timeout, self.socket.connect(remote_addr)) + .await + .map_err(|_| Error::TimeoutWhileConnectingToRemote { remote_addr })? + .map_err(|e| Error::UnableToConnectToRemote { + err: e.into(), + remote_addr, + })?; + + tracing::debug!(target: UDP_CLIENT_LOG_TARGET, "connected to remote: {remote_addr:?}."); + + Ok(()) + } + + /// # Errors + /// + /// Will return error if: + /// + /// - Can't write to the socket. + /// - Can't send data. + pub async fn send(&self, bytes: &[u8]) -> Result { + tracing::trace!(target: UDP_CLIENT_LOG_TARGET, "sending {bytes:?} ..."); + + let () = time::timeout(self.timeout, self.socket.writable()) + .await + .map_err(|_| Error::TimeoutWaitForWriteableSocket)? + .map_err(|e| Error::UnableToGetWritableSocket { err: e.into() })?; + + let sent_bytes = time::timeout(self.timeout, self.socket.send(bytes)) + .await + .map_err(|_| Error::TimeoutWhileSendingData { data: bytes.to_vec() })? + .map_err(|e| Error::UnableToSendData { + err: e.into(), + data: bytes.to_vec(), + })?; + + tracing::debug!(target: UDP_CLIENT_LOG_TARGET, "sent {sent_bytes} bytes to remote."); + + Ok(sent_bytes) + } + + /// # Errors + /// + /// Will return error if: + /// + /// - Can't read from the socket. + /// - Can't receive data. + /// + /// # Panics + /// + pub async fn receive(&self) -> Result, Error> { + tracing::trace!(target: UDP_CLIENT_LOG_TARGET, "receiving ..."); + + let mut buffer = [0u8; MAX_PACKET_SIZE]; + + let () = time::timeout(self.timeout, self.socket.readable()) + .await + .map_err(|_| Error::TimeoutWaitForReadableSocket)? + .map_err(|e| Error::UnableToGetReadableSocket { err: e.into() })?; + + let received_bytes = time::timeout(self.timeout, self.socket.recv(&mut buffer)) + .await + .map_err(|_| Error::TimeoutWhileReceivingData)? + .map_err(|e| Error::UnableToReceivingData { err: e.into() })?; + + let mut received: Vec = buffer.to_vec(); + Vec::truncate(&mut received, received_bytes); + + tracing::debug!(target: UDP_CLIENT_LOG_TARGET, "received {received_bytes} bytes: {received:?}"); + + Ok(received) + } +} + +#[allow(clippy::module_name_repetitions)] +#[derive(Debug)] +pub struct UdpTrackerClient { + pub client: UdpClient, +} + +impl UdpTrackerClient { + /// Creates a new `UdpTrackerClient` connected to a Udp Tracker server + /// + /// # Errors + /// + /// If unable to connect to the remote address. + /// + pub async fn new(remote_addr: SocketAddr, timeout: Duration) -> Result { + let client = UdpClient::connected(remote_addr, timeout).await?; + Ok(UdpTrackerClient { client }) + } + + /// # Errors + /// + /// Will return error if can't write request to bytes. + pub async fn send(&self, request: Request) -> Result { + tracing::trace!(target: UDP_CLIENT_LOG_TARGET, "sending request {request:?} ..."); + + // Write request into a buffer + // todo: optimize the pre-allocated amount based upon request type. + let mut writer = Cursor::new(Vec::with_capacity(200)); + let () = request + .write_bytes(&mut writer) + .map_err(|e| Error::UnableToWriteDataFromRequest { err: e.into(), request })?; + + self.client.send(writer.get_ref()).await + } + + /// # Errors + /// + /// Will return error if can't create response from the received payload (bytes buffer). + pub async fn receive(&self) -> Result { + let response = self.client.receive().await?; + + tracing::debug!(target: UDP_CLIENT_LOG_TARGET, "received {} bytes: {response:?}", response.len()); + + Response::parse_bytes(&response, true).map_err(|e| Error::UnableToParseResponse { err: e.into(), response }) + } +} + +/// Helper Function to Check if a UDP Service is Connectable +/// +/// # Panics +/// +/// It will return an error if unable to connect to the UDP service. +/// +/// # Errors +/// +pub async fn check(remote_addr: &SocketAddr) -> Result { + tracing::debug!("Checking Service (detail): {remote_addr:?}."); + + match UdpTrackerClient::new(*remote_addr, DEFAULT_TIMEOUT).await { + Ok(client) => { + let connect_request = ConnectRequest { + transaction_id: TransactionId(I32::new(123)), + }; + + // client.send() return usize, but doesn't use here + match client.send(connect_request.into()).await { + Ok(_) => (), + Err(e) => tracing::debug!("Error: {e:?}."), + }; + + let process = move |response| { + if matches!(response, Response::Connect(_connect_response)) { + Ok("Connected".to_string()) + } else { + Err("Did not Connect".to_string()) + } + }; + + let sleep = time::sleep(Duration::from_millis(2000)); + tokio::pin!(sleep); + + tokio::select! { + () = &mut sleep => { + Err("Timed Out".to_string()) + } + response = client.receive() => { + process(response.unwrap()) + } + } + } + Err(e) => Err(format!("{e:?}")), + } +} diff --git a/src/shared/bit_torrent/tracker/udp/mod.rs b/src/shared/bit_torrent/tracker/udp/mod.rs new file mode 100644 index 000000000..b9d5f34f6 --- /dev/null +++ b/src/shared/bit_torrent/tracker/udp/mod.rs @@ -0,0 +1,68 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use aquatic_udp_protocol::Request; +use thiserror::Error; +use torrust_tracker_located_error::DynError; + +pub mod client; + +/// The maximum number of bytes in a UDP packet. +pub const MAX_PACKET_SIZE: usize = 1496; +/// A magic 64-bit integer constant defined in the protocol that is used to +/// identify the protocol. +pub const PROTOCOL_ID: i64 = 0x0417_2710_1980; + +#[derive(Debug, Clone, Error)] +pub enum Error { + #[error("Timeout while waiting for socket to bind: {addr:?}")] + TimeoutWhileBindingToSocket { addr: SocketAddr }, + + #[error("Failed to bind to socket: {addr:?}, with error: {err:?}")] + UnableToBindToSocket { err: Arc, addr: SocketAddr }, + + #[error("Timeout while waiting for connection to remote: {remote_addr:?}")] + TimeoutWhileConnectingToRemote { remote_addr: SocketAddr }, + + #[error("Failed to connect to remote: {remote_addr:?}, with error: {err:?}")] + UnableToConnectToRemote { + err: Arc, + remote_addr: SocketAddr, + }, + + #[error("Timeout while waiting for the socket to become writable.")] + TimeoutWaitForWriteableSocket, + + #[error("Failed to get writable socket: {err:?}")] + UnableToGetWritableSocket { err: Arc }, + + #[error("Timeout while trying to send data: {data:?}")] + TimeoutWhileSendingData { data: Vec }, + + #[error("Failed to send data: {data:?}, with error: {err:?}")] + UnableToSendData { err: Arc, data: Vec }, + + #[error("Timeout while waiting for the socket to become readable.")] + TimeoutWaitForReadableSocket, + + #[error("Failed to get readable socket: {err:?}")] + UnableToGetReadableSocket { err: Arc }, + + #[error("Timeout while trying to receive data.")] + TimeoutWhileReceivingData, + + #[error("Failed to receive data: {err:?}")] + UnableToReceivingData { err: Arc }, + + #[error("Failed to get data from request: {request:?}, with error: {err:?}")] + UnableToWriteDataFromRequest { err: Arc, request: Request }, + + #[error("Failed to parse response: {response:?}, with error: {err:?}")] + UnableToParseResponse { err: Arc, response: Vec }, +} + +impl From for DynError { + fn from(e: Error) -> Self { + Arc::new(Box::new(e)) + } +} diff --git a/src/shared/clock/mod.rs b/src/shared/clock/mod.rs deleted file mode 100644 index 922ca3200..000000000 --- a/src/shared/clock/mod.rs +++ /dev/null @@ -1,398 +0,0 @@ -//! Time related functions and types. -//! -//! It's usually a good idea to control where the time comes from -//! in an application so that it can be mocked for testing and it can be -//! controlled in production so we get the intended behavior without -//! relying on the specific time zone for the underlying system. -//! -//! Clocks use the type `DurationSinceUnixEpoch` which is a -//! `std::time::Duration` since the Unix Epoch (timestamp). -//! -//! ```text -//! Local time: lun 2023-03-27 16:12:00 WEST -//! Universal time: lun 2023-03-27 15:12:00 UTC -//! Time zone: Atlantic/Canary (WEST, +0100) -//! Timestamp: 1679929914 -//! Duration: 1679929914.10167426 -//! ``` -//! -//! > **NOTICE**: internally the `Duration` is stores it's main unit as seconds in a `u64` and it will -//! overflow in 584.9 billion years. -//! -//! > **NOTICE**: the timestamp does not depend on the time zone. That gives you -//! the ability to use the clock regardless of the underlying system time zone -//! configuration. See [Unix time Wikipedia entry](https://en.wikipedia.org/wiki/Unix_time). -pub mod static_time; -pub mod time_extent; -pub mod utils; - -use std::num::IntErrorKind; -use std::str::FromStr; -use std::time::Duration; - -use chrono::{DateTime, NaiveDateTime, Utc}; - -/// Duration since the Unix Epoch. -pub type DurationSinceUnixEpoch = Duration; - -/// Clock types. -#[derive(Debug)] -pub enum Type { - /// Clock that returns the current time. - WorkingClock, - /// Clock that returns always the same fixed time. - StoppedClock, -} - -/// A generic structure that represents a clock. -/// -/// It can be either the working clock (production) or the stopped clock -/// (testing). It implements the `Time` trait, which gives you the current time. -#[derive(Debug)] -pub struct Clock; - -/// The working clock. It returns the current time. -pub type Working = Clock<{ Type::WorkingClock as usize }>; -/// The stopped clock. It returns always the same fixed time. -pub type Stopped = Clock<{ Type::StoppedClock as usize }>; - -/// The current clock. Defined at compilation time. -/// It can be either the working clock (production) or the stopped clock (testing). -#[cfg(not(test))] -pub type Current = Working; - -/// The current clock. Defined at compilation time. -/// It can be either the working clock (production) or the stopped clock (testing). -#[cfg(test)] -pub type Current = Stopped; - -/// Trait for types that can be used as a timestamp clock. -pub trait Time: Sized { - fn now() -> DurationSinceUnixEpoch; -} - -/// Trait for types that can be manipulate the current time in order to -/// get time in the future or in the past after or before a duration of time. -pub trait TimeNow: Time { - #[must_use] - fn add(add_time: &Duration) -> Option { - Self::now().checked_add(*add_time) - } - #[must_use] - fn sub(sub_time: &Duration) -> Option { - Self::now().checked_sub(*sub_time) - } -} - -/// It converts a string in ISO 8601 format to a timestamp. -/// For example, the string `1970-01-01T00:00:00.000Z` which is the Unix Epoch -/// will be converted to a timestamp of 0: `DurationSinceUnixEpoch::ZERO`. -/// -/// # Panics -/// -/// Will panic if the input time cannot be converted to `DateTime::`, internally using the `i64` type. -/// (this will naturally happen in 292.5 billion years) -#[must_use] -pub fn convert_from_iso_8601_to_timestamp(iso_8601: &str) -> DurationSinceUnixEpoch { - convert_from_datetime_utc_to_timestamp(&DateTime::::from_str(iso_8601).unwrap()) -} - -/// It converts a `DateTime::` to a timestamp. -/// For example, the `DateTime::` of the Unix Epoch will be converted to a -/// timestamp of 0: `DurationSinceUnixEpoch::ZERO`. -/// -/// # Panics -/// -/// Will panic if the input time overflows the `u64` type. -/// (this will naturally happen in 584.9 billion years) -#[must_use] -pub fn convert_from_datetime_utc_to_timestamp(datetime_utc: &DateTime) -> DurationSinceUnixEpoch { - DurationSinceUnixEpoch::from_secs(u64::try_from(datetime_utc.timestamp()).expect("Overflow of u64 seconds, very future!")) -} - -/// It converts a timestamp to a `DateTime::`. -/// For example, the timestamp of 0: `DurationSinceUnixEpoch::ZERO` will be -/// converted to the `DateTime::` of the Unix Epoch. -/// -/// # Panics -/// -/// Will panic if the input time overflows the `u64` seconds overflows the `i64` type. -/// (this will naturally happen in 292.5 billion years) -#[must_use] -pub fn convert_from_timestamp_to_datetime_utc(duration: DurationSinceUnixEpoch) -> DateTime { - DateTime::::from_naive_utc_and_offset( - NaiveDateTime::from_timestamp_opt( - i64::try_from(duration.as_secs()).expect("Overflow of i64 seconds, very future!"), - duration.subsec_nanos(), - ) - .unwrap(), - Utc, - ) -} - -#[cfg(test)] -mod tests { - use std::any::TypeId; - - use crate::shared::clock::{Current, Stopped, Time, Working}; - - #[test] - fn it_should_be_the_stopped_clock_as_default_when_testing() { - // We are testing, so we should default to the fixed time. - assert_eq!(TypeId::of::(), TypeId::of::()); - assert_eq!(Stopped::now(), Current::now()); - } - - #[test] - fn it_should_have_different_times() { - assert_ne!(TypeId::of::(), TypeId::of::()); - assert_ne!(Stopped::now(), Working::now()); - } - - mod timestamp { - use chrono::{DateTime, NaiveDateTime, Utc}; - - use crate::shared::clock::{ - convert_from_datetime_utc_to_timestamp, convert_from_iso_8601_to_timestamp, convert_from_timestamp_to_datetime_utc, - DurationSinceUnixEpoch, - }; - - #[test] - fn should_be_converted_to_datetime_utc() { - let timestamp = DurationSinceUnixEpoch::ZERO; - assert_eq!( - convert_from_timestamp_to_datetime_utc(timestamp), - DateTime::::from_naive_utc_and_offset(NaiveDateTime::from_timestamp_opt(0, 0).unwrap(), Utc) - ); - } - - #[test] - fn should_be_converted_from_datetime_utc() { - let datetime = DateTime::::from_naive_utc_and_offset(NaiveDateTime::from_timestamp_opt(0, 0).unwrap(), Utc); - assert_eq!( - convert_from_datetime_utc_to_timestamp(&datetime), - DurationSinceUnixEpoch::ZERO - ); - } - - #[test] - fn should_be_converted_from_datetime_utc_in_iso_8601() { - let iso_8601 = "1970-01-01T00:00:00.000Z".to_string(); - assert_eq!(convert_from_iso_8601_to_timestamp(&iso_8601), DurationSinceUnixEpoch::ZERO); - } - } -} - -mod working_clock { - use std::time::SystemTime; - - use super::{DurationSinceUnixEpoch, Time, TimeNow, Working}; - - impl Time for Working { - fn now() -> DurationSinceUnixEpoch { - SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap() - } - } - - impl TimeNow for Working {} -} - -/// Trait for types that can be used as a timestamp clock stopped -/// at a given time. -pub trait StoppedTime: TimeNow { - /// It sets the clock to a given time. - fn local_set(unix_time: &DurationSinceUnixEpoch); - - /// It sets the clock to the Unix Epoch. - fn local_set_to_unix_epoch() { - Self::local_set(&DurationSinceUnixEpoch::ZERO); - } - - /// It sets the clock to the time the application started. - fn local_set_to_app_start_time(); - - /// It sets the clock to the current system time. - fn local_set_to_system_time_now(); - - /// It adds a `Duration` to the clock. - /// - /// # Errors - /// - /// Will return `IntErrorKind` if `duration` would overflow the internal `Duration`. - fn local_add(duration: &Duration) -> Result<(), IntErrorKind>; - - /// It subtracts a `Duration` from the clock. - /// # Errors - /// - /// Will return `IntErrorKind` if `duration` would underflow the internal `Duration`. - fn local_sub(duration: &Duration) -> Result<(), IntErrorKind>; - - /// It resets the clock to default fixed time that is application start time (or the unix epoch when testing). - fn local_reset(); -} - -mod stopped_clock { - use std::num::IntErrorKind; - use std::time::Duration; - - use super::{DurationSinceUnixEpoch, Stopped, StoppedTime, Time, TimeNow}; - - impl Time for Stopped { - fn now() -> DurationSinceUnixEpoch { - detail::FIXED_TIME.with(|time| { - return *time.borrow(); - }) - } - } - - impl TimeNow for Stopped {} - - impl StoppedTime for Stopped { - fn local_set(unix_time: &DurationSinceUnixEpoch) { - detail::FIXED_TIME.with(|time| { - *time.borrow_mut() = *unix_time; - }); - } - - fn local_set_to_app_start_time() { - Self::local_set(&detail::get_app_start_time()); - } - - fn local_set_to_system_time_now() { - Self::local_set(&detail::get_app_start_time()); - } - - fn local_add(duration: &Duration) -> Result<(), IntErrorKind> { - detail::FIXED_TIME.with(|time| { - let time_borrowed = *time.borrow(); - *time.borrow_mut() = match time_borrowed.checked_add(*duration) { - Some(time) => time, - None => { - return Err(IntErrorKind::PosOverflow); - } - }; - Ok(()) - }) - } - - fn local_sub(duration: &Duration) -> Result<(), IntErrorKind> { - detail::FIXED_TIME.with(|time| { - let time_borrowed = *time.borrow(); - *time.borrow_mut() = match time_borrowed.checked_sub(*duration) { - Some(time) => time, - None => { - return Err(IntErrorKind::NegOverflow); - } - }; - Ok(()) - }) - } - - fn local_reset() { - Self::local_set(&detail::get_default_fixed_time()); - } - } - - #[cfg(test)] - mod tests { - use std::thread; - use std::time::Duration; - - use crate::shared::clock::{DurationSinceUnixEpoch, Stopped, StoppedTime, Time, TimeNow, Working}; - - #[test] - fn it_should_default_to_zero_when_testing() { - assert_eq!(Stopped::now(), DurationSinceUnixEpoch::ZERO); - } - - #[test] - fn it_should_possible_to_set_the_time() { - // Check we start with ZERO. - assert_eq!(Stopped::now(), Duration::ZERO); - - // Set to Current Time and Check - let timestamp = Working::now(); - Stopped::local_set(×tamp); - assert_eq!(Stopped::now(), timestamp); - - // Elapse the Current Time and Check - Stopped::local_add(×tamp).unwrap(); - assert_eq!(Stopped::now(), timestamp + timestamp); - - // Reset to ZERO and Check - Stopped::local_reset(); - assert_eq!(Stopped::now(), Duration::ZERO); - } - - #[test] - fn it_should_default_to_zero_on_thread_exit() { - assert_eq!(Stopped::now(), Duration::ZERO); - let after5 = Working::add(&Duration::from_secs(5)).unwrap(); - Stopped::local_set(&after5); - assert_eq!(Stopped::now(), after5); - - let t = thread::spawn(move || { - // each thread starts out with the initial value of ZERO - assert_eq!(Stopped::now(), Duration::ZERO); - - // and gets set to the current time. - let timestamp = Working::now(); - Stopped::local_set(×tamp); - assert_eq!(Stopped::now(), timestamp); - }); - - // wait for the thread to complete and bail out on panic - t.join().unwrap(); - - // we retain our original value of current time + 5sec despite the child thread - assert_eq!(Stopped::now(), after5); - - // Reset to ZERO and Check - Stopped::local_reset(); - assert_eq!(Stopped::now(), Duration::ZERO); - } - } - - mod detail { - use std::cell::RefCell; - use std::time::SystemTime; - - use crate::shared::clock::{static_time, DurationSinceUnixEpoch}; - - pub fn get_app_start_time() -> DurationSinceUnixEpoch { - (*static_time::TIME_AT_APP_START) - .duration_since(SystemTime::UNIX_EPOCH) - .unwrap() - } - - #[cfg(not(test))] - pub fn get_default_fixed_time() -> DurationSinceUnixEpoch { - get_app_start_time() - } - - #[cfg(test)] - pub fn get_default_fixed_time() -> DurationSinceUnixEpoch { - DurationSinceUnixEpoch::ZERO - } - - thread_local!(pub static FIXED_TIME: RefCell = RefCell::new(get_default_fixed_time())); - - #[cfg(test)] - mod tests { - use std::time::Duration; - - use crate::shared::clock::stopped_clock::detail::{get_app_start_time, get_default_fixed_time}; - - #[test] - fn it_should_get_the_zero_start_time_when_testing() { - assert_eq!(get_default_fixed_time(), Duration::ZERO); - } - - #[test] - fn it_should_get_app_start_time() { - const TIME_AT_WRITING_THIS_TEST: Duration = Duration::new(1_662_983_731, 22312); - assert!(get_app_start_time() > TIME_AT_WRITING_THIS_TEST); - } - } - } -} diff --git a/src/shared/clock/utils.rs b/src/shared/clock/utils.rs deleted file mode 100644 index 94d88d288..000000000 --- a/src/shared/clock/utils.rs +++ /dev/null @@ -1,11 +0,0 @@ -//! It contains helper functions related to time. -use super::DurationSinceUnixEpoch; - -/// Serializes a `DurationSinceUnixEpoch` as a Unix timestamp in milliseconds. -/// # Errors -/// -/// Will return `serde::Serializer::Error` if unable to serialize the `unix_time_value`. -pub fn ser_unix_time_value(unix_time_value: &DurationSinceUnixEpoch, ser: S) -> Result { - #[allow(clippy::cast_possible_truncation)] - ser.serialize_u64(unix_time_value.as_millis() as u64) -} diff --git a/src/shared/crypto/keys.rs b/src/shared/crypto/keys.rs index 92e180996..deb70574f 100644 --- a/src/shared/crypto/keys.rs +++ b/src/shared/crypto/keys.rs @@ -86,8 +86,6 @@ pub mod seeds { #[cfg(test)] mod tests { - use std::convert::TryInto; - use crate::shared::crypto::ephemeral_instance_keys::RANDOM_SEED; use crate::shared::crypto::keys::seeds::detail::ZEROED_TEST_SEED; use crate::shared::crypto::keys::seeds::CURRENT_SEED; diff --git a/src/shared/mod.rs b/src/shared/mod.rs index f016ba913..8c95effe1 100644 --- a/src/shared/mod.rs +++ b/src/shared/mod.rs @@ -1,8 +1,6 @@ //! Modules with generic logic used by several modules. //! //! - [`bit_torrent`]: `BitTorrent` protocol related logic. -//! - [`clock`]: Times services. //! - [`crypto`]: Encryption related logic. pub mod bit_torrent; -pub mod clock; pub mod crypto; diff --git a/src/tracker/auth.rs b/src/tracker/auth.rs deleted file mode 100644 index 466187af5..000000000 --- a/src/tracker/auth.rs +++ /dev/null @@ -1,266 +0,0 @@ -//! Tracker authentication services and structs. -//! -//! This module contains functions to handle tracker keys. -//! Tracker keys are tokens used to authenticate the tracker clients when the tracker runs -//! in `private` or `private_listed` modes. -//! -//! There are services to [`generate`](crate::tracker::auth::generate) and [`verify`](crate::tracker::auth::verify) authentication keys. -//! -//! Authentication keys are used only by [`HTTP`](crate::servers::http) trackers. All keys have an expiration time, that means -//! they are only valid during a period of time. After that time the expiring key will no longer be valid. -//! -//! Keys are stored in this struct: -//! -//! ```rust,no_run -//! use torrust_tracker::tracker::auth::Key; -//! use torrust_tracker::shared::clock::DurationSinceUnixEpoch; -//! -//! pub struct ExpiringKey { -//! /// Random 32-char string. For example: `YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ` -//! pub key: Key, -//! /// Timestamp, the key will be no longer valid after this timestamp -//! pub valid_until: DurationSinceUnixEpoch, -//! } -//! ``` -//! -//! You can generate a new key valid for `9999` seconds and `0` nanoseconds from the current time with the following: -//! -//! ```rust,no_run -//! use torrust_tracker::tracker::auth; -//! use std::time::Duration; -//! -//! let expiring_key = auth::generate(Duration::new(9999, 0)); -//! -//! // And you can later verify it with: -//! -//! assert!(auth::verify(&expiring_key).is_ok()); -//! ``` - -use std::panic::Location; -use std::str::FromStr; -use std::sync::Arc; -use std::time::Duration; - -use derive_more::Display; -use log::debug; -use rand::distributions::Alphanumeric; -use rand::{thread_rng, Rng}; -use serde::{Deserialize, Serialize}; -use thiserror::Error; -use torrust_tracker_located_error::LocatedError; - -use crate::shared::bit_torrent::common::AUTH_KEY_LENGTH; -use crate::shared::clock::{convert_from_timestamp_to_datetime_utc, Current, DurationSinceUnixEpoch, Time, TimeNow}; - -#[must_use] -/// It generates a new random 32-char authentication [`ExpiringKey`](crate::tracker::auth::ExpiringKey) -/// -/// # Panics -/// -/// It would panic if the `lifetime: Duration` + Duration is more than `Duration::MAX`. -pub fn generate(lifetime: Duration) -> ExpiringKey { - let random_id: String = thread_rng() - .sample_iter(&Alphanumeric) - .take(AUTH_KEY_LENGTH) - .map(char::from) - .collect(); - - debug!("Generated key: {}, valid for: {:?} seconds", random_id, lifetime); - - ExpiringKey { - key: random_id.parse::().unwrap(), - valid_until: Current::add(&lifetime).unwrap(), - } -} - -/// It verifies an [`ExpiringKey`](crate::tracker::auth::ExpiringKey). It checks if the expiration date has passed. -/// -/// # Errors -/// -/// Will return `Error::KeyExpired` if `auth_key.valid_until` is past the `current_time`. -/// -/// Will return `Error::KeyInvalid` if `auth_key.valid_until` is past the `None`. -pub fn verify(auth_key: &ExpiringKey) -> Result<(), Error> { - let current_time: DurationSinceUnixEpoch = Current::now(); - - if auth_key.valid_until < current_time { - Err(Error::KeyExpired { - location: Location::caller(), - }) - } else { - Ok(()) - } -} - -/// An authentication key which has an expiration time. -/// After that time is will automatically become invalid. -#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone)] -pub struct ExpiringKey { - /// Random 32-char string. For example: `YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ` - pub key: Key, - /// Timestamp, the key will be no longer valid after this timestamp - pub valid_until: DurationSinceUnixEpoch, -} - -impl std::fmt::Display for ExpiringKey { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "key: `{}`, valid until `{}`", self.key, self.expiry_time()) - } -} - -impl ExpiringKey { - #[must_use] - pub fn key(&self) -> Key { - self.key.clone() - } - - /// It returns the expiry time. For example, for the starting time for Unix Epoch - /// (timestamp 0) it will return a `DateTime` whose string representation is - /// `1970-01-01 00:00:00 UTC`. - /// - /// # Panics - /// - /// Will panic when the key timestamp overflows the internal i64 type. - /// (this will naturally happen in 292.5 billion years) - #[must_use] - pub fn expiry_time(&self) -> chrono::DateTime { - convert_from_timestamp_to_datetime_utc(self.valid_until) - } -} - -/// A randomly generated token used for authentication. -/// -/// It contains lower and uppercase letters and numbers. -/// It's a 32-char string. -#[derive(Serialize, Deserialize, Debug, Eq, PartialEq, Clone, Display, Hash)] -pub struct Key(String); - -/// Error returned when a key cannot be parsed from a string. -/// -/// ```rust,no_run -/// use torrust_tracker::tracker::auth::Key; -/// use std::str::FromStr; -/// -/// let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; -/// let key = Key::from_str(key_string); -/// -/// assert!(key.is_ok()); -/// assert_eq!(key.unwrap().to_string(), key_string); -/// ``` -/// -/// If the string does not contains a valid key, the parser function will return this error. -#[derive(Debug, PartialEq, Eq)] -pub struct ParseKeyError; - -impl FromStr for Key { - type Err = ParseKeyError; - - fn from_str(s: &str) -> Result { - if s.len() != AUTH_KEY_LENGTH { - return Err(ParseKeyError); - } - - Ok(Self(s.to_string())) - } -} - -/// Verification error. Error returned when an [`ExpiringKey`](crate::tracker::auth::ExpiringKey) cannot be verified with the [`verify(...)`](crate::tracker::auth::verify) function. -/// -#[derive(Debug, Error)] -#[allow(dead_code)] -pub enum Error { - #[error("Key could not be verified: {source}")] - KeyVerificationError { - source: LocatedError<'static, dyn std::error::Error + Send + Sync>, - }, - #[error("Failed to read key: {key}, {location}")] - UnableToReadKey { - location: &'static Location<'static>, - key: Box, - }, - #[error("Key has expired, {location}")] - KeyExpired { location: &'static Location<'static> }, -} - -impl From for Error { - fn from(e: r2d2_sqlite::rusqlite::Error) -> Self { - Error::KeyVerificationError { - source: (Arc::new(e) as Arc).into(), - } - } -} - -#[cfg(test)] -mod tests { - - mod key { - use std::str::FromStr; - - use crate::tracker::auth::Key; - - #[test] - fn should_be_parsed_from_an_string() { - let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; - let key = Key::from_str(key_string); - - assert!(key.is_ok()); - assert_eq!(key.unwrap().to_string(), key_string); - } - } - - mod expiring_auth_key { - use std::str::FromStr; - use std::time::Duration; - - use crate::shared::clock::{Current, StoppedTime}; - use crate::tracker::auth; - - #[test] - fn should_be_parsed_from_an_string() { - let key_string = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ"; - let auth_key = auth::Key::from_str(key_string); - - assert!(auth_key.is_ok()); - assert_eq!(auth_key.unwrap().to_string(), key_string); - } - - #[test] - fn should_be_displayed() { - // Set the time to the current time. - Current::local_set_to_unix_epoch(); - - let expiring_key = auth::generate(Duration::from_secs(0)); - - assert_eq!( - expiring_key.to_string(), - format!("key: `{}`, valid until `1970-01-01 00:00:00 UTC`", expiring_key.key) // cspell:disable-line - ); - } - - #[test] - fn should_be_generated_with_a_expiration_time() { - let expiring_key = auth::generate(Duration::new(9999, 0)); - - assert!(auth::verify(&expiring_key).is_ok()); - } - - #[test] - fn should_be_generate_and_verified() { - // Set the time to the current time. - Current::local_set_to_system_time_now(); - - // Make key that is valid for 19 seconds. - let expiring_key = auth::generate(Duration::from_secs(19)); - - // Mock the time has passed 10 sec. - Current::local_add(&Duration::from_secs(10)).unwrap(); - - assert!(auth::verify(&expiring_key).is_ok()); - - // Mock the time has passed another 10 sec. - Current::local_add(&Duration::from_secs(10)).unwrap(); - - assert!(auth::verify(&expiring_key).is_err()); - } - } -} diff --git a/src/tracker/torrent.rs b/src/tracker/torrent.rs deleted file mode 100644 index 4f7e28b6b..000000000 --- a/src/tracker/torrent.rs +++ /dev/null @@ -1,476 +0,0 @@ -//! Structs to store the swarm data. -//! -//! There are to main data structures: -//! -//! - A torrent [`Entry`](crate::tracker::torrent::Entry): it contains all the information stored by the tracker for one torrent. -//! - The [`SwarmMetadata`](crate::tracker::torrent::SwarmMetadata): it contains aggregate information that can me derived from the torrent entries. -//! -//! A "swarm" is a network of peers that are trying to download the same torrent. -//! -//! The torrent entry contains the "swarm" data, which is basically the list of peers in the swarm. -//! That's the most valuable information the peer want to get from the tracker, because it allows them to -//! start downloading torrent from those peers. -//! -//! > **NOTICE**: that both swarm data (torrent entries) and swarm metadata (aggregate counters) are related to only one torrent. -//! -//! The "swarm metadata" contains aggregate data derived from the torrent entries. There two types of data: -//! -//! - For **active peers**: metrics related to the current active peers in the swarm. -//! - **Historical data**: since the tracker started running. -//! -//! The tracker collects metrics for: -//! -//! - The number of peers that have completed downloading the torrent since the tracker started collecting metrics. -//! - The number of peers that have completed downloading the torrent and are still active, that means they are actively participating in the network, -//! by announcing themselves periodically to the tracker. Since they have completed downloading they have a full copy of the torrent data. Peers with a -//! full copy of the data are called "seeders". -//! - The number of peers that have NOT completed downloading the torrent and are still active, that means they are actively participating in the network. -//! Peer that don not have a full copy of the torrent data are called "leechers". -//! -//! > **NOTICE**: that both [`SwarmMetadata`](crate::tracker::torrent::SwarmMetadata) and [`SwarmStats`](crate::tracker::torrent::SwarmStats) contain the same information. [`SwarmMetadata`](crate::tracker::torrent::SwarmMetadata) is using the names used on [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html). -use std::time::Duration; - -use aquatic_udp_protocol::AnnounceEvent; -use serde::{Deserialize, Serialize}; - -use super::peer::{self, Peer}; -use crate::shared::bit_torrent::common::MAX_SCRAPE_TORRENTS; -use crate::shared::clock::{Current, TimeNow}; - -/// A data structure containing all the information about a torrent in the tracker. -/// -/// This is the tracker entry for a given torrent and contains the swarm data, -/// that's the list of all the peers trying to download the same torrent. -/// The tracker keeps one entry like this for every torrent. -#[derive(Serialize, Deserialize, Clone, Debug)] -pub struct Entry { - /// The swarm: a network of peers that are all trying to download the torrent associated to this entry - #[serde(skip)] - pub peers: std::collections::BTreeMap, - /// The number of peers that have ever completed downloading the torrent associated to this entry - pub completed: u32, -} - -/// Swarm statistics for one torrent. -/// Swarm metadata dictionary in the scrape response. -/// -/// See [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) -#[derive(Debug, PartialEq, Default)] -pub struct SwarmMetadata { - /// The number of peers that have ever completed downloading - pub downloaded: u32, - /// The number of active peers that have completed downloading (seeders) - pub complete: u32, - /// The number of active peers that have not completed downloading (leechers) - pub incomplete: u32, -} - -impl SwarmMetadata { - #[must_use] - pub fn zeroed() -> Self { - Self::default() - } -} - -/// Swarm statistics for one torrent. -/// -/// See [BEP 48: Tracker Protocol Extension: Scrape](https://www.bittorrent.org/beps/bep_0048.html) -#[derive(Debug, PartialEq, Default)] -pub struct SwarmStats { - /// The number of peers that have ever completed downloading - pub completed: u32, - /// The number of active peers that have completed downloading (seeders) - pub seeders: u32, - /// The number of active peers that have not completed downloading (leechers) - pub leechers: u32, -} - -impl Entry { - #[must_use] - pub fn new() -> Entry { - Entry { - peers: std::collections::BTreeMap::new(), - completed: 0, - } - } - - /// It updates a peer and returns true if the number of complete downloads have increased. - /// - /// The number of peers that have complete downloading is synchronously updated when peers are updated. - /// That's the total torrent downloads counter. - pub fn update_peer(&mut self, peer: &peer::Peer) -> bool { - let mut did_torrent_stats_change: bool = false; - - match peer.event { - AnnounceEvent::Stopped => { - let _: Option = self.peers.remove(&peer.peer_id); - } - AnnounceEvent::Completed => { - let peer_old = self.peers.insert(peer.peer_id, *peer); - // Don't count if peer was not previously known - if peer_old.is_some() { - self.completed += 1; - did_torrent_stats_change = true; - } - } - _ => { - let _: Option = self.peers.insert(peer.peer_id, *peer); - } - } - - did_torrent_stats_change - } - - /// Get all swarm peers, limiting the result to the maximum number of scrape - /// torrents. - #[must_use] - pub fn get_all_peers(&self) -> Vec<&peer::Peer> { - self.peers.values().take(MAX_SCRAPE_TORRENTS as usize).collect() - } - - /// It returns the list of peers for a given peer client, limiting the - /// result to the maximum number of scrape torrents. - /// - /// It filters out the input peer, typically because we want to return this - /// list of peers to that client peer. - #[must_use] - pub fn get_peers_for_peer(&self, client: &Peer) -> Vec<&peer::Peer> { - self.peers - .values() - // Take peers which are not the client peer - .filter(|peer| peer.peer_addr != client.peer_addr) - // Limit the number of peers on the result - .take(MAX_SCRAPE_TORRENTS as usize) - .collect() - } - - /// It returns the swarm metadata (statistics) as a tuple: - /// - /// `(seeders, completed, leechers)` - #[allow(clippy::cast_possible_truncation)] - #[must_use] - pub fn get_stats(&self) -> (u32, u32, u32) { - let seeders: u32 = self.peers.values().filter(|peer| peer.is_seeder()).count() as u32; - let leechers: u32 = self.peers.len() as u32 - seeders; - (seeders, self.completed, leechers) - } - - /// It returns the swarm metadata (statistics) as an struct - #[must_use] - pub fn get_swarm_metadata(&self) -> SwarmMetadata { - // code-review: consider using always this function instead of `get_stats`. - let (seeders, completed, leechers) = self.get_stats(); - SwarmMetadata { - complete: seeders, - downloaded: completed, - incomplete: leechers, - } - } - - /// It removes peer from the swarm that have not been updated for more than `max_peer_timeout` seconds - pub fn remove_inactive_peers(&mut self, max_peer_timeout: u32) { - let current_cutoff = Current::sub(&Duration::from_secs(u64::from(max_peer_timeout))).unwrap_or_default(); - self.peers.retain(|_, peer| peer.updated > current_cutoff); - } -} - -impl Default for Entry { - fn default() -> Self { - Self::new() - } -} - -#[cfg(test)] -mod tests { - - mod torrent_entry { - - use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - use std::ops::Sub; - use std::time::Duration; - - use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; - - use crate::shared::clock::{Current, DurationSinceUnixEpoch, Stopped, StoppedTime, Time, Working}; - use crate::tracker::peer; - use crate::tracker::torrent::Entry; - - struct TorrentPeerBuilder { - peer: peer::Peer, - } - - impl TorrentPeerBuilder { - pub fn default() -> TorrentPeerBuilder { - let default_peer = peer::Peer { - peer_id: peer::Id([0u8; 20]), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080), - updated: Current::now(), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), - event: AnnounceEvent::Started, - }; - TorrentPeerBuilder { peer: default_peer } - } - - pub fn with_event_completed(mut self) -> Self { - self.peer.event = AnnounceEvent::Completed; - self - } - - pub fn with_peer_address(mut self, peer_addr: SocketAddr) -> Self { - self.peer.peer_addr = peer_addr; - self - } - - pub fn with_peer_id(mut self, peer_id: peer::Id) -> Self { - self.peer.peer_id = peer_id; - self - } - - pub fn with_number_of_bytes_left(mut self, left: i64) -> Self { - self.peer.left = NumberOfBytes(left); - self - } - - pub fn updated_at(mut self, updated: DurationSinceUnixEpoch) -> Self { - self.peer.updated = updated; - self - } - - pub fn into(self) -> peer::Peer { - self.peer - } - } - - /// A torrent seeder is a peer with 0 bytes left to download which - /// has not announced it has stopped - fn a_torrent_seeder() -> peer::Peer { - TorrentPeerBuilder::default() - .with_number_of_bytes_left(0) - .with_event_completed() - .into() - } - - /// A torrent leecher is a peer that is not a seeder. - /// Leecher: left > 0 OR event = Stopped - fn a_torrent_leecher() -> peer::Peer { - TorrentPeerBuilder::default() - .with_number_of_bytes_left(1) - .with_event_completed() - .into() - } - - #[test] - fn the_default_torrent_entry_should_contain_an_empty_list_of_peers() { - let torrent_entry = Entry::new(); - - assert_eq!(torrent_entry.get_all_peers().len(), 0); - } - - #[test] - fn a_new_peer_can_be_added_to_a_torrent_entry() { - let mut torrent_entry = Entry::new(); - let torrent_peer = TorrentPeerBuilder::default().into(); - - torrent_entry.update_peer(&torrent_peer); // Add the peer - - assert_eq!(*torrent_entry.get_all_peers()[0], torrent_peer); - assert_eq!(torrent_entry.get_all_peers().len(), 1); - } - - #[test] - fn a_torrent_entry_should_contain_the_list_of_peers_that_were_added_to_the_torrent() { - let mut torrent_entry = Entry::new(); - let torrent_peer = TorrentPeerBuilder::default().into(); - - torrent_entry.update_peer(&torrent_peer); // Add the peer - - assert_eq!(torrent_entry.get_all_peers(), vec![&torrent_peer]); - } - - #[test] - fn a_peer_can_be_updated_in_a_torrent_entry() { - let mut torrent_entry = Entry::new(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.update_peer(&torrent_peer); // Add the peer - - torrent_peer.event = AnnounceEvent::Completed; // Update the peer - torrent_entry.update_peer(&torrent_peer); // Update the peer in the torrent entry - - assert_eq!(torrent_entry.get_all_peers()[0].event, AnnounceEvent::Completed); - } - - #[test] - fn a_peer_should_be_removed_from_a_torrent_entry_when_the_peer_announces_it_has_stopped() { - let mut torrent_entry = Entry::new(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.update_peer(&torrent_peer); // Add the peer - - torrent_peer.event = AnnounceEvent::Stopped; // Update the peer - torrent_entry.update_peer(&torrent_peer); // Update the peer in the torrent entry - - assert_eq!(torrent_entry.get_all_peers().len(), 0); - } - - #[test] - fn torrent_stats_change_when_a_previously_known_peer_announces_it_has_completed_the_torrent() { - let mut torrent_entry = Entry::new(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - - torrent_entry.update_peer(&torrent_peer); // Add the peer - - torrent_peer.event = AnnounceEvent::Completed; // Update the peer - let stats_have_changed = torrent_entry.update_peer(&torrent_peer); // Update the peer in the torrent entry - - assert!(stats_have_changed); - } - - #[test] - fn torrent_stats_should_not_change_when_a_peer_announces_it_has_completed_the_torrent_if_it_is_the_first_announce_from_the_peer( - ) { - let mut torrent_entry = Entry::new(); - let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); - - // Add a peer that did not exist before in the entry - let torrent_stats_have_not_changed = !torrent_entry.update_peer(&torrent_peer_announcing_complete_event); - - assert!(torrent_stats_have_not_changed); - } - - #[test] - fn a_torrent_entry_should_return_the_list_of_peers_for_a_given_peer_filtering_out_the_client_that_is_making_the_request() - { - let mut torrent_entry = Entry::new(); - let peer_socket_address = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080); - let torrent_peer = TorrentPeerBuilder::default().with_peer_address(peer_socket_address).into(); - torrent_entry.update_peer(&torrent_peer); // Add peer - - // Get peers excluding the one we have just added - let peers = torrent_entry.get_peers_for_peer(&torrent_peer); - - assert_eq!(peers.len(), 0); - } - - #[test] - fn two_peers_with_the_same_ip_but_different_port_should_be_considered_different_peers() { - let mut torrent_entry = Entry::new(); - - let peer_ip = IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)); - - // Add peer 1 - let torrent_peer_1 = TorrentPeerBuilder::default() - .with_peer_address(SocketAddr::new(peer_ip, 8080)) - .into(); - torrent_entry.update_peer(&torrent_peer_1); - - // Add peer 2 - let torrent_peer_2 = TorrentPeerBuilder::default() - .with_peer_address(SocketAddr::new(peer_ip, 8081)) - .into(); - torrent_entry.update_peer(&torrent_peer_2); - - // Get peers for peer 1 - let peers = torrent_entry.get_peers_for_peer(&torrent_peer_1); - - // The peer 2 using the same IP but different port should be included - assert_eq!(peers[0].peer_addr.ip(), Ipv4Addr::new(127, 0, 0, 1)); - assert_eq!(peers[0].peer_addr.port(), 8081); - } - - fn peer_id_from_i32(number: i32) -> peer::Id { - let peer_id = number.to_le_bytes(); - peer::Id([ - 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, 0u8, peer_id[0], peer_id[1], - peer_id[2], peer_id[3], - ]) - } - - #[test] - fn the_tracker_should_limit_the_list_of_peers_to_74_when_clients_scrape_torrents() { - let mut torrent_entry = Entry::new(); - - // We add one more peer than the scrape limit - for peer_number in 1..=74 + 1 { - let torrent_peer = TorrentPeerBuilder::default() - .with_peer_id(peer_id_from_i32(peer_number)) - .into(); - torrent_entry.update_peer(&torrent_peer); - } - - let peers = torrent_entry.get_all_peers(); - - assert_eq!(peers.len(), 74); - } - - #[test] - fn torrent_stats_should_have_the_number_of_seeders_for_a_torrent() { - let mut torrent_entry = Entry::new(); - let torrent_seeder = a_torrent_seeder(); - - torrent_entry.update_peer(&torrent_seeder); // Add seeder - - assert_eq!(torrent_entry.get_stats().0, 1); - } - - #[test] - fn torrent_stats_should_have_the_number_of_leechers_for_a_torrent() { - let mut torrent_entry = Entry::new(); - let torrent_leecher = a_torrent_leecher(); - - torrent_entry.update_peer(&torrent_leecher); // Add leecher - - assert_eq!(torrent_entry.get_stats().2, 1); - } - - #[test] - fn torrent_stats_should_have_the_number_of_peers_that_having_announced_at_least_two_events_the_latest_one_is_the_completed_event( - ) { - let mut torrent_entry = Entry::new(); - let mut torrent_peer = TorrentPeerBuilder::default().into(); - torrent_entry.update_peer(&torrent_peer); // Add the peer - - // Announce "Completed" torrent download event. - torrent_peer.event = AnnounceEvent::Completed; - torrent_entry.update_peer(&torrent_peer); // Update the peer - - let number_of_previously_known_peers_with_completed_torrent = torrent_entry.get_stats().1; - - assert_eq!(number_of_previously_known_peers_with_completed_torrent, 1); - } - - #[test] - fn torrent_stats_should_not_include_a_peer_in_the_completed_counter_if_the_peer_has_announced_only_one_event() { - let mut torrent_entry = Entry::new(); - let torrent_peer_announcing_complete_event = TorrentPeerBuilder::default().with_event_completed().into(); - - // Announce "Completed" torrent download event. - // It's the first event announced from this peer. - torrent_entry.update_peer(&torrent_peer_announcing_complete_event); // Add the peer - - let number_of_peers_with_completed_torrent = torrent_entry.get_stats().1; - - assert_eq!(number_of_peers_with_completed_torrent, 0); - } - - #[test] - fn a_torrent_entry_should_remove_a_peer_not_updated_after_a_timeout_in_seconds() { - let mut torrent_entry = Entry::new(); - - let timeout = 120u32; - - let now = Working::now(); - Stopped::local_set(&now); - - let timeout_seconds_before_now = now.sub(Duration::from_secs(u64::from(timeout))); - let inactive_peer = TorrentPeerBuilder::default() - .updated_at(timeout_seconds_before_now.sub(Duration::from_secs(1))) - .into(); - torrent_entry.update_peer(&inactive_peer); // Add the peer - - torrent_entry.remove_inactive_peers(timeout); - - assert_eq!(torrent_entry.peers.len(), 0); - } - } -} diff --git a/tests/README.md b/tests/README.md deleted file mode 100644 index 04860056c..000000000 --- a/tests/README.md +++ /dev/null @@ -1,9 +0,0 @@ -### Running Benchmarks - -#### HTTP(S) Announce Peer + Torrent -For this benchmark we use the tool [wrk](https://github.com/wg/wrk). - -To run the benchmark using wrk, execute the following example script (change the url to your own tracker url): - - wrk -c200 -t1 -d10s -s ./wrk_benchmark_announce.lua --latency http://tracker.dutchbits.nl - diff --git a/tests/common/app.rs b/tests/common/app.rs deleted file mode 100644 index ee3fba064..000000000 --- a/tests/common/app.rs +++ /dev/null @@ -1,8 +0,0 @@ -use std::sync::Arc; - -use torrust_tracker::bootstrap; -use torrust_tracker::tracker::Tracker; - -pub fn setup_with_configuration(configuration: &Arc) -> Arc { - bootstrap::app::initialize_with_configuration(configuration) -} diff --git a/tests/common/clock.rs b/tests/common/clock.rs new file mode 100644 index 000000000..5d94bb83d --- /dev/null +++ b/tests/common/clock.rs @@ -0,0 +1,16 @@ +use std::time::Duration; + +use torrust_tracker_clock::clock::Time; + +use crate::CurrentClock; + +#[test] +fn it_should_use_stopped_time_for_testing() { + assert_eq!(CurrentClock::dbg_clock_type(), "Stopped".to_owned()); + + let time = CurrentClock::now(); + std::thread::sleep(Duration::from_millis(50)); + let time_2 = CurrentClock::now(); + + assert_eq!(time, time_2); +} diff --git a/tests/common/fixtures.rs b/tests/common/fixtures.rs index 7062c8376..bbdebff76 100644 --- a/tests/common/fixtures.rs +++ b/tests/common/fixtures.rs @@ -1,69 +1,3 @@ -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - -use aquatic_udp_protocol::{AnnounceEvent, NumberOfBytes}; -use torrust_tracker::shared::clock::DurationSinceUnixEpoch; -use torrust_tracker::tracker::peer::{self, Id, Peer}; - -pub struct PeerBuilder { - peer: Peer, -} - -impl PeerBuilder { - #[allow(dead_code)] - pub fn default() -> PeerBuilder { - Self { - peer: default_peer_for_testing(), - } - } - - #[allow(dead_code)] - pub fn with_peer_id(mut self, peer_id: &Id) -> Self { - self.peer.peer_id = *peer_id; - self - } - - #[allow(dead_code)] - pub fn with_peer_addr(mut self, peer_addr: &SocketAddr) -> Self { - self.peer.peer_addr = *peer_addr; - self - } - - #[allow(dead_code)] - pub fn with_bytes_pending_to_download(mut self, left: i64) -> Self { - self.peer.left = NumberOfBytes(left); - self - } - - #[allow(dead_code)] - pub fn with_no_bytes_pending_to_download(mut self) -> Self { - self.peer.left = NumberOfBytes(0); - self - } - - #[allow(dead_code)] - pub fn build(self) -> Peer { - self.into() - } - - #[allow(dead_code)] - pub fn into(self) -> Peer { - self.peer - } -} - -#[allow(dead_code)] -fn default_peer_for_testing() -> Peer { - Peer { - peer_id: peer::Id(*b"-qB00000000000000000"), - peer_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(126, 0, 0, 1)), 8080), - updated: DurationSinceUnixEpoch::new(1_669_397_478_934, 0), - uploaded: NumberOfBytes(0), - downloaded: NumberOfBytes(0), - left: NumberOfBytes(0), - event: AnnounceEvent::Started, - } -} - #[allow(dead_code)] pub fn invalid_info_hashes() -> Vec { [ diff --git a/tests/common/mod.rs b/tests/common/mod.rs index 51a8a5b03..281c1fb9c 100644 --- a/tests/common/mod.rs +++ b/tests/common/mod.rs @@ -1,4 +1,4 @@ -pub mod app; +pub mod clock; pub mod fixtures; pub mod http; pub mod udp; diff --git a/tests/integration.rs b/tests/integration.rs index 5d66d9074..8e3d46826 100644 --- a/tests/integration.rs +++ b/tests/integration.rs @@ -3,5 +3,18 @@ //! ```text //! cargo test --test integration //! ``` + +use torrust_tracker_clock::clock; mod common; mod servers; + +/// This code needs to be copied into each crate. +/// Working version, for production. +#[cfg(not(test))] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Working; + +/// Stopped version, for testing. +#[cfg(test)] +#[allow(dead_code)] +pub(crate) type CurrentClock = clock::Stopped; diff --git a/tests/servers/api/environment.rs b/tests/servers/api/environment.rs new file mode 100644 index 000000000..92ef7b70b --- /dev/null +++ b/tests/servers/api/environment.rs @@ -0,0 +1,90 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use futures::executor::block_on; +use torrust_tracker::bootstrap::app::initialize_with_configuration; +use torrust_tracker::bootstrap::jobs::make_rust_tls; +use torrust_tracker::core::Tracker; +use torrust_tracker::servers::apis::server::{ApiServer, Launcher, Running, Stopped}; +use torrust_tracker::servers::registar::Registar; +use torrust_tracker_configuration::{Configuration, HttpApi}; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer; + +use super::connection_info::ConnectionInfo; + +pub struct Environment { + pub config: Arc, + pub tracker: Arc, + pub registar: Registar, + pub server: ApiServer, +} + +impl Environment { + /// Add a torrent to the tracker + pub fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + self.tracker.upsert_peer_and_get_stats(info_hash, peer); + } +} + +impl Environment { + pub fn new(configuration: &Arc) -> Self { + let tracker = initialize_with_configuration(configuration); + + let config = Arc::new(configuration.http_api.clone().expect("missing API configuration")); + + let bind_to = config.bind_address; + + let tls = block_on(make_rust_tls(&config.tsl_config)).map(|tls| tls.expect("tls config failed")); + + let server = ApiServer::new(Launcher::new(bind_to, tls)); + + Self { + config, + tracker, + registar: Registar::default(), + server, + } + } + + pub async fn start(self) -> Environment { + let access_tokens = Arc::new(self.config.access_tokens.clone()); + + Environment { + config: self.config, + tracker: self.tracker.clone(), + registar: self.registar.clone(), + server: self + .server + .start(self.tracker, self.registar.give_form(), access_tokens) + .await + .unwrap(), + } + } +} + +impl Environment { + pub async fn new(configuration: &Arc) -> Self { + Environment::::new(configuration).start().await + } + + pub async fn stop(self) -> Environment { + Environment { + config: self.config, + tracker: self.tracker, + registar: Registar::default(), + server: self.server.stop().await.unwrap(), + } + } + + pub fn get_connection_info(&self) -> ConnectionInfo { + ConnectionInfo { + bind_address: self.server.state.binding.to_string(), + api_token: self.config.access_tokens.get("admin").cloned(), + } + } + + pub fn bind_address(&self) -> SocketAddr { + self.server.state.binding + } +} diff --git a/tests/servers/api/mod.rs b/tests/servers/api/mod.rs index 7022da9b4..38df46e9b 100644 --- a/tests/servers/api/mod.rs +++ b/tests/servers/api/mod.rs @@ -1,14 +1,20 @@ use std::sync::Arc; -use torrust_tracker::tracker::Tracker; +use torrust_tracker::core::Tracker; +use torrust_tracker::servers::apis::server; pub mod connection_info; -pub mod test_environment; +pub mod environment; pub mod v1; +pub type Started = environment::Environment; + /// It forces a database error by dropping all tables. /// That makes any query fail. -/// code-review: alternatively we could inject a database mock in the future. +/// code-review: +/// Alternatively we could: +/// - Inject a database mock in the future. +/// - Inject directly the database reference passed to the Tracker type. pub fn force_database_error(tracker: &Arc) { - tracker.database.drop_database_tables().unwrap(); + tracker.drop_database_tables().unwrap(); } diff --git a/tests/servers/api/test_environment.rs b/tests/servers/api/test_environment.rs deleted file mode 100644 index dbb23dcfa..000000000 --- a/tests/servers/api/test_environment.rs +++ /dev/null @@ -1,105 +0,0 @@ -use std::sync::Arc; - -use torrust_tracker::servers::apis::server::{ApiServer, RunningApiServer, StoppedApiServer}; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -use torrust_tracker::tracker::peer::Peer; -use torrust_tracker::tracker::Tracker; - -use super::connection_info::ConnectionInfo; -use crate::common::app::setup_with_configuration; - -#[allow(clippy::module_name_repetitions, dead_code)] -pub type StoppedTestEnvironment = TestEnvironment; -#[allow(clippy::module_name_repetitions)] -pub type RunningTestEnvironment = TestEnvironment; - -pub struct TestEnvironment { - pub cfg: Arc, - pub tracker: Arc, - pub state: S, -} - -#[allow(dead_code)] -pub struct Stopped { - api_server: StoppedApiServer, -} - -pub struct Running { - api_server: RunningApiServer, -} - -impl TestEnvironment { - /// Add a torrent to the tracker - pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &Peer) { - self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; - } -} - -impl TestEnvironment { - pub fn new_stopped(cfg: torrust_tracker_configuration::Configuration) -> Self { - let cfg = Arc::new(cfg); - - let tracker = setup_with_configuration(&cfg); - - let api_server = api_server(cfg.http_api.clone()); - - Self { - cfg, - tracker, - state: Stopped { api_server }, - } - } - - pub async fn start(self) -> TestEnvironment { - TestEnvironment { - cfg: self.cfg, - tracker: self.tracker.clone(), - state: Running { - api_server: self.state.api_server.start(self.tracker).await.unwrap(), - }, - } - } - - pub fn config_mut(&mut self) -> &mut torrust_tracker_configuration::HttpApi { - &mut self.state.api_server.cfg - } -} - -impl TestEnvironment { - pub async fn new_running(cfg: torrust_tracker_configuration::Configuration) -> Self { - let test_env = StoppedTestEnvironment::new_stopped(cfg); - - test_env.start().await - } - - pub async fn stop(self) -> TestEnvironment { - TestEnvironment { - cfg: self.cfg, - tracker: self.tracker, - state: Stopped { - api_server: self.state.api_server.stop().await.unwrap(), - }, - } - } - - pub fn get_connection_info(&self) -> ConnectionInfo { - ConnectionInfo { - bind_address: self.state.api_server.state.bind_addr.to_string(), - api_token: self.state.api_server.cfg.access_tokens.get("admin").cloned(), - } - } -} - -#[allow(clippy::module_name_repetitions)] -pub fn stopped_test_environment(cfg: torrust_tracker_configuration::Configuration) -> StoppedTestEnvironment { - TestEnvironment::new_stopped(cfg) -} - -#[allow(clippy::module_name_repetitions)] -pub async fn running_test_environment(cfg: torrust_tracker_configuration::Configuration) -> RunningTestEnvironment { - TestEnvironment::new_running(cfg).await -} - -pub fn api_server(cfg: torrust_tracker_configuration::HttpApi) -> StoppedApiServer { - ApiServer::new(cfg) -} diff --git a/tests/servers/api/v1/asserts.rs b/tests/servers/api/v1/asserts.rs index 955293db1..aeecfa170 100644 --- a/tests/servers/api/v1/asserts.rs +++ b/tests/servers/api/v1/asserts.rs @@ -61,6 +61,18 @@ pub async fn assert_bad_request(response: Response, body: &str) { assert_eq!(response.text().await.unwrap(), body); } +pub async fn assert_bad_request_with_text(response: Response, text: &str) { + assert_eq!(response.status(), 400); + assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); + assert!(response.text().await.unwrap().contains(text)); +} + +pub async fn assert_unprocessable_content(response: Response, text: &str) { + assert_eq!(response.status(), 422); + assert_eq!(response.headers().get("content-type").unwrap(), "text/plain; charset=utf-8"); + assert!(response.text().await.unwrap().contains(text)); +} + pub async fn assert_not_found(response: Response) { assert_eq!(response.status(), 404); // todo: missing header in the response @@ -82,10 +94,26 @@ pub async fn assert_invalid_infohash_param(response: Response, invalid_infohash: .await; } -pub async fn assert_invalid_auth_key_param(response: Response, invalid_auth_key: &str) { +pub async fn assert_invalid_auth_key_get_param(response: Response, invalid_auth_key: &str) { assert_bad_request(response, &format!("Invalid auth key id param \"{}\"", &invalid_auth_key)).await; } +pub async fn assert_invalid_auth_key_post_param(response: Response, invalid_auth_key: &str) { + assert_bad_request_with_text( + response, + &format!("Invalid URL: invalid auth key: string \"{}\"", &invalid_auth_key), + ) + .await; +} + +pub async fn assert_unprocessable_auth_key_duration_param(response: Response, _invalid_value: &str) { + assert_unprocessable_content( + response, + "Failed to deserialize the JSON body into the target type: seconds_valid: invalid type", + ) + .await; +} + pub async fn assert_invalid_key_duration_param(response: Response, invalid_key_duration: &str) { assert_bad_request( response, diff --git a/tests/servers/api/v1/client.rs b/tests/servers/api/v1/client.rs index 2b6db2e77..3d95c10ca 100644 --- a/tests/servers/api/v1/client.rs +++ b/tests/servers/api/v1/client.rs @@ -1,4 +1,5 @@ use reqwest::Response; +use serde::Serialize; use crate::common::http::{Query, QueryParam, ReqwestQuery}; use crate::servers::api::connection_info::ConnectionInfo; @@ -18,7 +19,11 @@ impl Client { } pub async fn generate_auth_key(&self, seconds_valid: i32) -> Response { - self.post(&format!("key/{}", &seconds_valid)).await + self.post_empty(&format!("key/{}", &seconds_valid)).await + } + + pub async fn add_auth_key(&self, add_key_form: AddKeyForm) -> Response { + self.post_form("keys", &add_key_form).await } pub async fn delete_auth_key(&self, key: &str) -> Response { @@ -30,7 +35,7 @@ impl Client { } pub async fn whitelist_a_torrent(&self, info_hash: &str) -> Response { - self.post(&format!("whitelist/{}", &info_hash)).await + self.post_empty(&format!("whitelist/{}", &info_hash)).await } pub async fn remove_torrent_from_whitelist(&self, info_hash: &str) -> Response { @@ -63,10 +68,20 @@ impl Client { self.get_request_with_query(path, query).await } - pub async fn post(&self, path: &str) -> Response { + pub async fn post_empty(&self, path: &str) -> Response { + reqwest::Client::new() + .post(self.base_url(path).clone()) + .query(&ReqwestQuery::from(self.query_with_token())) + .send() + .await + .unwrap() + } + + pub async fn post_form(&self, path: &str, form: &T) -> Response { reqwest::Client::new() .post(self.base_url(path).clone()) .query(&ReqwestQuery::from(self.query_with_token())) + .json(&form) .send() .await .unwrap() @@ -101,7 +116,7 @@ impl Client { } } -async fn get(path: &str, query: Option) -> Response { +pub async fn get(path: &str, query: Option) -> Response { match query { Some(params) => reqwest::Client::builder() .build() @@ -114,3 +129,10 @@ async fn get(path: &str, query: Option) -> Response { None => reqwest::Client::builder().build().unwrap().get(path).send().await.unwrap(), } } + +#[derive(Serialize, Debug)] +pub struct AddKeyForm { + #[serde(rename = "key")] + pub opt_key: Option, + pub seconds_valid: Option, +} diff --git a/tests/servers/api/v1/contract/authentication.rs b/tests/servers/api/v1/contract/authentication.rs index fb8de1810..49981dd02 100644 --- a/tests/servers/api/v1/contract/authentication.rs +++ b/tests/servers/api/v1/contract/authentication.rs @@ -1,83 +1,83 @@ use torrust_tracker_test_helpers::configuration; use crate::common::http::{Query, QueryParam}; -use crate::servers::api::test_environment::running_test_environment; use crate::servers::api::v1::asserts::{assert_token_not_valid, assert_unauthorized}; use crate::servers::api::v1::client::Client; +use crate::servers::api::Started; #[tokio::test] async fn should_authenticate_requests_by_using_a_token_query_param() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - let token = test_env.get_connection_info().api_token.unwrap(); + let token = env.get_connection_info().api_token.unwrap(); - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .get_request_with_query("stats", Query::params([QueryParam::new("token", &token)].to_vec())) .await; assert_eq!(response.status(), 200); - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_missing() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .get_request_with_query("stats", Query::default()) .await; assert_unauthorized(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_empty() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .get_request_with_query("stats", Query::params([QueryParam::new("token", "")].to_vec())) .await; assert_token_not_valid(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_not_authenticate_requests_when_the_token_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .get_request_with_query("stats", Query::params([QueryParam::new("token", "INVALID TOKEN")].to_vec())) .await; assert_token_not_valid(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_allow_the_token_query_param_to_be_at_any_position_in_the_url_query() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - let token = test_env.get_connection_info().api_token.unwrap(); + let token = env.get_connection_info().api_token.unwrap(); // At the beginning of the query component - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .get_request(&format!("torrents?token={token}&limit=1")) .await; assert_eq!(response.status(), 200); // At the end of the query component - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .get_request(&format!("torrents?limit=1&token={token}")) .await; assert_eq!(response.status(), 200); - test_env.stop().await; + env.stop().await; } diff --git a/tests/servers/api/v1/contract/configuration.rs b/tests/servers/api/v1/contract/configuration.rs index cfdb59b0c..4220f62d2 100644 --- a/tests/servers/api/v1/contract/configuration.rs +++ b/tests/servers/api/v1/contract/configuration.rs @@ -1,18 +1,33 @@ -use torrust_tracker_test_helpers::configuration; +// use std::sync::Arc; -use crate::servers::api::test_environment::stopped_test_environment; +// use axum_server::tls_rustls::RustlsConfig; +// use futures::executor::block_on; +// use torrust_tracker_test_helpers::configuration; + +// use crate::common::app::setup_with_configuration; +// use crate::servers::api::environment::stopped_environment; #[tokio::test] #[ignore] #[should_panic = "Could not receive bind_address."] async fn should_fail_with_ssl_enabled_and_bad_ssl_config() { - let mut test_env = stopped_test_environment(configuration::ephemeral()); + // let tracker = setup_with_configuration(&Arc::new(configuration::ephemeral())); + + // let config = tracker.config.http_api.clone(); + + // let bind_to = config + // .bind_address + // .parse::() + // .expect("Tracker API bind_address invalid."); - let cfg = test_env.config_mut(); + // let tls = + // if let (true, Some(cert), Some(key)) = (&true, &Some("bad cert path".to_string()), &Some("bad cert path".to_string())) { + // Some(block_on(RustlsConfig::from_pem_file(cert, key)).expect("Could not read tls cert.")) + // } else { + // None + // }; - cfg.ssl_enabled = true; - cfg.ssl_key_path = Some("bad key path".to_string()); - cfg.ssl_cert_path = Some("bad cert path".to_string()); + // let env = new_stopped(tracker, bind_to, tls); - test_env.start().await; + // env.start().await; } diff --git a/tests/servers/api/v1/contract/context/auth_key.rs b/tests/servers/api/v1/contract/context/auth_key.rs index a99272e84..41f421ca6 100644 --- a/tests/servers/api/v1/contract/context/auth_key.rs +++ b/tests/servers/api/v1/contract/context/auth_key.rs @@ -1,122 +1,199 @@ use std::time::Duration; -use torrust_tracker::tracker::auth::Key; +use serde::Serialize; +use torrust_tracker::core::auth::Key; use torrust_tracker_test_helpers::configuration; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; -use crate::servers::api::force_database_error; -use crate::servers::api::test_environment::running_test_environment; use crate::servers::api::v1::asserts::{ assert_auth_key_utf8, assert_failed_to_delete_key, assert_failed_to_generate_key, assert_failed_to_reload_keys, - assert_invalid_auth_key_param, assert_invalid_key_duration_param, assert_ok, assert_token_not_valid, assert_unauthorized, + assert_invalid_auth_key_get_param, assert_invalid_auth_key_post_param, assert_ok, assert_token_not_valid, + assert_unauthorized, assert_unprocessable_auth_key_duration_param, }; -use crate::servers::api::v1::client::Client; +use crate::servers::api::v1::client::{AddKeyForm, Client}; +use crate::servers::api::{force_database_error, Started}; #[tokio::test] -async fn should_allow_generating_a_new_auth_key() { - let test_env = running_test_environment(configuration::ephemeral()).await; +async fn should_allow_generating_a_new_random_auth_key() { + let env = Started::new(&configuration::ephemeral().into()).await; + + let response = Client::new(env.get_connection_info()) + .add_auth_key(AddKeyForm { + opt_key: None, + seconds_valid: Some(60), + }) + .await; - let seconds_valid = 60; + let auth_key_resource = assert_auth_key_utf8(response).await; + + assert!(env + .tracker + .authenticate(&auth_key_resource.key.parse::().unwrap()) + .await + .is_ok()); - let response = Client::new(test_env.get_connection_info()) - .generate_auth_key(seconds_valid) + env.stop().await; +} + +#[tokio::test] +async fn should_allow_uploading_a_preexisting_auth_key() { + let env = Started::new(&configuration::ephemeral().into()).await; + + let response = Client::new(env.get_connection_info()) + .add_auth_key(AddKeyForm { + opt_key: Some("Xc1L4PbQJSFGlrgSRZl8wxSFAuMa21z5".to_string()), + seconds_valid: Some(60), + }) .await; let auth_key_resource = assert_auth_key_utf8(response).await; - // Verify the key with the tracker - assert!(test_env + assert!(env .tracker - .verify_auth_key(&auth_key_resource.key.parse::().unwrap()) + .authenticate(&auth_key_resource.key.parse::().unwrap()) .await .is_ok()); - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - let seconds_valid = 60; - - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .generate_auth_key(seconds_valid) - .await; + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + .add_auth_key(AddKeyForm { + opt_key: None, + seconds_valid: Some(60), + }) + .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) - .generate_auth_key(seconds_valid) + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) + .add_auth_key(AddKeyForm { + opt_key: None, + seconds_valid: Some(60), + }) .await; assert_unauthorized(response).await; - test_env.stop().await; -} - -#[tokio::test] -async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let invalid_key_durations = [ - // "", it returns 404 - // " ", it returns 404 - "-1", "text", - ]; - - for invalid_key_duration in invalid_key_durations { - let response = Client::new(test_env.get_connection_info()) - .post(&format!("key/{invalid_key_duration}")) - .await; - - assert_invalid_key_duration_param(response, invalid_key_duration).await; - } - - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_auth_key_cannot_be_generated() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - force_database_error(&test_env.tracker); + force_database_error(&env.tracker); - let seconds_valid = 60; - let response = Client::new(test_env.get_connection_info()) - .generate_auth_key(seconds_valid) + let response = Client::new(env.get_connection_info()) + .add_auth_key(AddKeyForm { + opt_key: None, + seconds_valid: Some(60), + }) .await; assert_failed_to_generate_key(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_allow_deleting_an_auth_key() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let seconds_valid = 60; - let auth_key = test_env + let auth_key = env .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) + .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .delete_auth_key(&auth_key.key.to_string()) .await; assert_ok(response).await; - test_env.stop().await; + env.stop().await; +} + +#[tokio::test] +async fn should_fail_generating_a_new_auth_key_when_the_provided_key_is_invalid() { + #[derive(Serialize, Debug)] + pub struct InvalidAddKeyForm { + #[serde(rename = "key")] + pub opt_key: Option, + pub seconds_valid: u64, + } + + let env = Started::new(&configuration::ephemeral().into()).await; + + let invalid_keys = [ + // "", it returns 404 + // " ", it returns 404 + "-1", // Not a string + "invalid", // Invalid string + "GQEs2ZNcCm9cwEV9dBpcPB5OwNFWFiR", // Not a 32-char string + "%QEs2ZNcCm9cwEV9dBpcPB5OwNFWFiRd", // Invalid char. + ]; + + for invalid_key in invalid_keys { + let response = Client::new(env.get_connection_info()) + .post_form( + "keys", + &InvalidAddKeyForm { + opt_key: Some(invalid_key.to_string()), + seconds_valid: 60, + }, + ) + .await; + + assert_invalid_auth_key_post_param(response, invalid_key).await; + } + + env.stop().await; +} + +#[tokio::test] +async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid() { + #[derive(Serialize, Debug)] + pub struct InvalidAddKeyForm { + #[serde(rename = "key")] + pub opt_key: Option, + pub seconds_valid: String, + } + + let env = Started::new(&configuration::ephemeral().into()).await; + + let invalid_key_durations = [ + // "", it returns 404 + // " ", it returns 404 + "-1", "text", + ]; + + for invalid_key_duration in invalid_key_durations { + let response = Client::new(env.get_connection_info()) + .post_form( + "keys", + &InvalidAddKeyForm { + opt_key: None, + seconds_valid: invalid_key_duration.to_string(), + }, + ) + .await; + + assert_unprocessable_auth_key_duration_param(response, invalid_key_duration).await; + } + + env.stop().await; } #[tokio::test] async fn should_fail_deleting_an_auth_key_when_the_key_id_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let invalid_auth_keys = [ // "", it returns a 404 @@ -129,137 +206,217 @@ async fn should_fail_deleting_an_auth_key_when_the_key_id_is_invalid() { ]; for invalid_auth_key in &invalid_auth_keys { - let response = Client::new(test_env.get_connection_info()) - .delete_auth_key(invalid_auth_key) - .await; + let response = Client::new(env.get_connection_info()).delete_auth_key(invalid_auth_key).await; - assert_invalid_auth_key_param(response, invalid_auth_key).await; + assert_invalid_auth_key_get_param(response, invalid_auth_key).await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_auth_key_cannot_be_deleted() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let seconds_valid = 60; - let auth_key = test_env + let auth_key = env .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) + .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); - force_database_error(&test_env.tracker); + force_database_error(&env.tracker); - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .delete_auth_key(&auth_key.key.to_string()) .await; assert_failed_to_delete_key(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_not_allow_deleting_an_auth_key_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let seconds_valid = 60; // Generate new auth key - let auth_key = test_env + let auth_key = env .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) + .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .delete_auth_key(&auth_key.key.to_string()) - .await; + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + .delete_auth_key(&auth_key.key.to_string()) + .await; assert_token_not_valid(response).await; // Generate new auth key - let auth_key = test_env + let auth_key = env .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) + .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) .delete_auth_key(&auth_key.key.to_string()) .await; assert_unauthorized(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_allow_reloading_keys() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let seconds_valid = 60; - test_env - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) + env.tracker + .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); - let response = Client::new(test_env.get_connection_info()).reload_keys().await; + let response = Client::new(env.get_connection_info()).reload_keys().await; assert_ok(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_keys_cannot_be_reloaded() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let seconds_valid = 60; - test_env - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) + env.tracker + .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); - force_database_error(&test_env.tracker); + force_database_error(&env.tracker); - let response = Client::new(test_env.get_connection_info()).reload_keys().await; + let response = Client::new(env.get_connection_info()).reload_keys().await; assert_failed_to_reload_keys(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_not_allow_reloading_keys_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let seconds_valid = 60; - test_env - .tracker - .generate_auth_key(Duration::from_secs(seconds_valid)) + env.tracker + .generate_auth_key(Some(Duration::from_secs(seconds_valid))) .await .unwrap(); - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .reload_keys() - .await; + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + .reload_keys() + .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) .reload_keys() .await; assert_unauthorized(response).await; - test_env.stop().await; + env.stop().await; +} + +mod deprecated_generate_key_endpoint { + + use torrust_tracker::core::auth::Key; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; + use crate::servers::api::v1::asserts::{ + assert_auth_key_utf8, assert_failed_to_generate_key, assert_invalid_key_duration_param, assert_token_not_valid, + assert_unauthorized, + }; + use crate::servers::api::v1::client::Client; + use crate::servers::api::{force_database_error, Started}; + + #[tokio::test] + async fn should_allow_generating_a_new_auth_key() { + let env = Started::new(&configuration::ephemeral().into()).await; + + let seconds_valid = 60; + + let response = Client::new(env.get_connection_info()).generate_auth_key(seconds_valid).await; + + let auth_key_resource = assert_auth_key_utf8(response).await; + + assert!(env + .tracker + .authenticate(&auth_key_resource.key.parse::().unwrap()) + .await + .is_ok()); + + env.stop().await; + } + + #[tokio::test] + async fn should_not_allow_generating_a_new_auth_key_for_unauthenticated_users() { + let env = Started::new(&configuration::ephemeral().into()).await; + + let seconds_valid = 60; + + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + .generate_auth_key(seconds_valid) + .await; + + assert_token_not_valid(response).await; + + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) + .generate_auth_key(seconds_valid) + .await; + + assert_unauthorized(response).await; + + env.stop().await; + } + + #[tokio::test] + async fn should_fail_generating_a_new_auth_key_when_the_key_duration_is_invalid() { + let env = Started::new(&configuration::ephemeral().into()).await; + + let invalid_key_durations = [ + // "", it returns 404 + // " ", it returns 404 + "-1", "text", + ]; + + for invalid_key_duration in invalid_key_durations { + let response = Client::new(env.get_connection_info()) + .post_empty(&format!("key/{invalid_key_duration}")) + .await; + + assert_invalid_key_duration_param(response, invalid_key_duration).await; + } + + env.stop().await; + } + + #[tokio::test] + async fn should_fail_when_the_auth_key_cannot_be_generated() { + let env = Started::new(&configuration::ephemeral().into()).await; + + force_database_error(&env.tracker); + + let seconds_valid = 60; + let response = Client::new(env.get_connection_info()).generate_auth_key(seconds_valid).await; + + assert_failed_to_generate_key(response).await; + + env.stop().await; + } } diff --git a/tests/servers/api/v1/contract/context/health_check.rs b/tests/servers/api/v1/contract/context/health_check.rs new file mode 100644 index 000000000..d8dc3c030 --- /dev/null +++ b/tests/servers/api/v1/contract/context/health_check.rs @@ -0,0 +1,20 @@ +use torrust_tracker::servers::apis::v1::context::health_check::resources::{Report, Status}; +use torrust_tracker_test_helpers::configuration; + +use crate::servers::api::v1::client::get; +use crate::servers::api::Started; + +#[tokio::test] +async fn health_check_endpoint_should_return_status_ok_if_api_is_running() { + let env = Started::new(&configuration::ephemeral().into()).await; + + let url = format!("http://{}/api/health_check", env.get_connection_info().bind_address); + + let response = get(&url, None).await; + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + assert_eq!(response.json::().await.unwrap(), Report { status: Status::Ok }); + + env.stop().await; +} diff --git a/tests/servers/api/v1/contract/context/mod.rs b/tests/servers/api/v1/contract/context/mod.rs index 6d3fb7566..032e13b0b 100644 --- a/tests/servers/api/v1/contract/context/mod.rs +++ b/tests/servers/api/v1/contract/context/mod.rs @@ -1,4 +1,5 @@ pub mod auth_key; +pub mod health_check; pub mod stats; pub mod torrent; pub mod whitelist; diff --git a/tests/servers/api/v1/contract/context/stats.rs b/tests/servers/api/v1/contract/context/stats.rs index 45f7e604a..c4c992484 100644 --- a/tests/servers/api/v1/contract/context/stats.rs +++ b/tests/servers/api/v1/contract/context/stats.rs @@ -1,27 +1,25 @@ use std::str::FromStr; use torrust_tracker::servers::apis::v1::context::stats::resources::Stats; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; -use crate::common::fixtures::PeerBuilder; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; -use crate::servers::api::test_environment::running_test_environment; use crate::servers::api::v1::asserts::{assert_stats, assert_token_not_valid, assert_unauthorized}; use crate::servers::api::v1::client::Client; +use crate::servers::api::Started; #[tokio::test] async fn should_allow_getting_tracker_statistics() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - test_env - .add_torrent_peer( - &InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), - &PeerBuilder::default().into(), - ) - .await; + env.add_torrent_peer( + &InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(), + &PeerBuilder::default().into(), + ); - let response = Client::new(test_env.get_connection_info()).get_tracker_statistics().await; + let response = Client::new(env.get_connection_info()).get_tracker_statistics().await; assert_stats( response, @@ -46,26 +44,24 @@ async fn should_allow_getting_tracker_statistics() { ) .await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_not_allow_getting_tracker_statistics_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .get_tracker_statistics() - .await; + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + .get_tracker_statistics() + .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) .get_tracker_statistics() .await; assert_unauthorized(response).await; - test_env.stop().await; + env.stop().await; } diff --git a/tests/servers/api/v1/contract/context/torrent.rs b/tests/servers/api/v1/contract/context/torrent.rs index ab497787f..7ef35e729 100644 --- a/tests/servers/api/v1/contract/context/torrent.rs +++ b/tests/servers/api/v1/contract/context/torrent.rs @@ -2,13 +2,12 @@ use std::str::FromStr; use torrust_tracker::servers::apis::v1::context::torrent::resources::peer::Peer; use torrust_tracker::servers::apis::v1::context::torrent::resources::torrent::{self, Torrent}; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; -use crate::common::fixtures::PeerBuilder; use crate::common::http::{Query, QueryParam}; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; -use crate::servers::api::test_environment::running_test_environment; use crate::servers::api::v1::asserts::{ assert_bad_request, assert_invalid_infohash_param, assert_not_found, assert_token_not_valid, assert_torrent_info, assert_torrent_list, assert_torrent_not_known, assert_unauthorized, @@ -17,16 +16,17 @@ use crate::servers::api::v1::client::Client; use crate::servers::api::v1::contract::fixtures::{ invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found, }; +use crate::servers::api::Started; #[tokio::test] -async fn should_allow_getting_torrents() { - let test_env = running_test_environment(configuration::ephemeral()).await; +async fn should_allow_getting_all_torrents() { + let env = Started::new(&configuration::ephemeral().into()).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - test_env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; + env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()); - let response = Client::new(test_env.get_connection_info()).get_torrents(Query::empty()).await; + let response = Client::new(env.get_connection_info()).get_torrents(Query::empty()).await; assert_torrent_list( response, @@ -35,26 +35,25 @@ async fn should_allow_getting_torrents() { seeders: 1, completed: 0, leechers: 0, - peers: None, // Torrent list does not include the peer list for each torrent }], ) .await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_allow_limiting_the_torrents_in_the_result() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; // torrents are ordered alphabetically by infohashes let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); - test_env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; - test_env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; + env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()); + env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()); - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .get_torrents(Query::params([QueryParam::new("limit", "1")].to_vec())) .await; @@ -65,26 +64,25 @@ async fn should_allow_limiting_the_torrents_in_the_result() { seeders: 1, completed: 0, leechers: 0, - peers: None, // Torrent list does not include the peer list for each torrent }], ) .await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_allow_the_torrents_result_pagination() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; // torrents are ordered alphabetically by infohashes let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); - test_env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()).await; - test_env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()).await; + env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()); + env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()); - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .get_torrents(Query::params([QueryParam::new("offset", "1")].to_vec())) .await; @@ -95,80 +93,140 @@ async fn should_allow_the_torrents_result_pagination() { seeders: 1, completed: 0, leechers: 0, - peers: None, // Torrent list does not include the peer list for each torrent }], ) .await; - test_env.stop().await; + env.stop().await; +} + +#[tokio::test] +async fn should_allow_getting_a_list_of_torrents_providing_infohashes() { + let env = Started::new(&configuration::ephemeral().into()).await; + + let info_hash_1 = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); // DevSkim: ignore DS173237 + let info_hash_2 = InfoHash::from_str("0b3aea4adc213ce32295be85d3883a63bca25446").unwrap(); // DevSkim: ignore DS173237 + + env.add_torrent_peer(&info_hash_1, &PeerBuilder::default().into()); + env.add_torrent_peer(&info_hash_2, &PeerBuilder::default().into()); + + let response = Client::new(env.get_connection_info()) + .get_torrents(Query::params( + [ + QueryParam::new("info_hash", "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d"), // DevSkim: ignore DS173237 + QueryParam::new("info_hash", "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d"), // DevSkim: ignore DS173237 + ] + .to_vec(), + )) + .await; + + assert_torrent_list( + response, + vec![ + torrent::ListItem { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), // DevSkim: ignore DS173237 + seeders: 1, + completed: 0, + leechers: 0, + }, + torrent::ListItem { + info_hash: "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_string(), // DevSkim: ignore DS173237 + seeders: 1, + completed: 0, + leechers: 0, + }, + ], + ) + .await; + + env.stop().await; } #[tokio::test] async fn should_fail_getting_torrents_when_the_offset_query_parameter_cannot_be_parsed() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let invalid_offsets = [" ", "-1", "1.1", "INVALID OFFSET"]; for invalid_offset in &invalid_offsets { - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .get_torrents(Query::params([QueryParam::new("offset", invalid_offset)].to_vec())) .await; assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_getting_torrents_when_the_limit_query_parameter_cannot_be_parsed() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let invalid_limits = [" ", "-1", "1.1", "INVALID LIMIT"]; for invalid_limit in &invalid_limits { - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .get_torrents(Query::params([QueryParam::new("limit", invalid_limit)].to_vec())) .await; assert_bad_request(response, "Failed to deserialize query string: invalid digit found in string").await; } - test_env.stop().await; + env.stop().await; +} + +#[tokio::test] +async fn should_fail_getting_torrents_when_the_info_hash_parameter_is_invalid() { + let env = Started::new(&configuration::ephemeral().into()).await; + + let invalid_info_hashes = [" ", "-1", "1.1", "INVALID INFO_HASH"]; + + for invalid_info_hash in &invalid_info_hashes { + let response = Client::new(env.get_connection_info()) + .get_torrents(Query::params([QueryParam::new("info_hash", invalid_info_hash)].to_vec())) + .await; + + assert_bad_request( + response, + &format!("Invalid URL: invalid infohash param: string \"{invalid_info_hash}\", expected a 40 character long string"), + ) + .await; + } + + env.stop().await; } #[tokio::test] async fn should_not_allow_getting_torrents_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .get_torrents(Query::empty()) - .await; + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + .get_torrents(Query::empty()) + .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) .get_torrents(Query::default()) .await; assert_unauthorized(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_allow_getting_a_torrent_info() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); let peer = PeerBuilder::default().into(); - test_env.add_torrent_peer(&info_hash, &peer).await; + env.add_torrent_peer(&info_hash, &peer); - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .get_torrent(&info_hash.to_string()) .await; @@ -184,68 +242,62 @@ async fn should_allow_getting_a_torrent_info() { ) .await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_while_getting_a_torrent_info_when_the_torrent_does_not_exist() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .get_torrent(&info_hash.to_string()) .await; assert_torrent_not_known(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_getting_a_torrent_info_when_the_provided_infohash_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; for invalid_infohash in &invalid_infohashes_returning_bad_request() { - let response = Client::new(test_env.get_connection_info()) - .get_torrent(invalid_infohash) - .await; + let response = Client::new(env.get_connection_info()).get_torrent(invalid_infohash).await; assert_invalid_infohash_param(response, invalid_infohash).await; } for invalid_infohash in &invalid_infohashes_returning_not_found() { - let response = Client::new(test_env.get_connection_info()) - .get_torrent(invalid_infohash) - .await; + let response = Client::new(env.get_connection_info()).get_torrent(invalid_infohash).await; assert_not_found(response).await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_not_allow_getting_a_torrent_info_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let info_hash = InfoHash::from_str("9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d").unwrap(); - test_env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()).await; + env.add_torrent_peer(&info_hash, &PeerBuilder::default().into()); - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .get_torrent(&info_hash.to_string()) - .await; + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + .get_torrent(&info_hash.to_string()) + .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) .get_torrent(&info_hash.to_string()) .await; assert_unauthorized(response).await; - test_env.stop().await; + env.stop().await; } diff --git a/tests/servers/api/v1/contract/context/whitelist.rs b/tests/servers/api/v1/contract/context/whitelist.rs index 60ab4c901..29064ec9e 100644 --- a/tests/servers/api/v1/contract/context/whitelist.rs +++ b/tests/servers/api/v1/contract/context/whitelist.rs @@ -1,11 +1,9 @@ use std::str::FromStr; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::servers::api::connection_info::{connection_with_invalid_token, connection_with_no_token}; -use crate::servers::api::force_database_error; -use crate::servers::api::test_environment::running_test_environment; use crate::servers::api::v1::asserts::{ assert_failed_to_reload_whitelist, assert_failed_to_remove_torrent_from_whitelist, assert_failed_to_whitelist_torrent, assert_invalid_infohash_param, assert_not_found, assert_ok, assert_token_not_valid, assert_unauthorized, @@ -14,35 +12,33 @@ use crate::servers::api::v1::client::Client; use crate::servers::api::v1::contract::fixtures::{ invalid_infohashes_returning_bad_request, invalid_infohashes_returning_not_found, }; +use crate::servers::api::{force_database_error, Started}; #[tokio::test] async fn should_allow_whitelisting_a_torrent() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let response = Client::new(test_env.get_connection_info()) - .whitelist_a_torrent(&info_hash) - .await; + let response = Client::new(env.get_connection_info()).whitelist_a_torrent(&info_hash).await; assert_ok(response).await; assert!( - test_env - .tracker + env.tracker .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) .await ); - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let api_client = Client::new(test_env.get_connection_info()); + let api_client = Client::new(env.get_connection_info()); let response = api_client.whitelist_a_torrent(&info_hash).await; assert_ok(response).await; @@ -50,55 +46,51 @@ async fn should_allow_whitelisting_a_torrent_that_has_been_already_whitelisted() let response = api_client.whitelist_a_torrent(&info_hash).await; assert_ok(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_not_allow_whitelisting_a_torrent_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .whitelist_a_torrent(&info_hash) - .await; + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + .whitelist_a_torrent(&info_hash) + .await; assert_token_not_valid(response).await; - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) .whitelist_a_torrent(&info_hash) .await; assert_unauthorized(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_torrent_cannot_be_whitelisted() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let info_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - force_database_error(&test_env.tracker); + force_database_error(&env.tracker); - let response = Client::new(test_env.get_connection_info()) - .whitelist_a_torrent(&info_hash) - .await; + let response = Client::new(env.get_connection_info()).whitelist_a_torrent(&info_hash).await; assert_failed_to_whitelist_torrent(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_whitelisting_a_torrent_when_the_provided_infohash_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; for invalid_infohash in &invalid_infohashes_returning_bad_request() { - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .whitelist_a_torrent(invalid_infohash) .await; @@ -106,55 +98,55 @@ async fn should_fail_whitelisting_a_torrent_when_the_provided_infohash_is_invali } for invalid_infohash in &invalid_infohashes_returning_not_found() { - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .whitelist_a_torrent(invalid_infohash) .await; assert_not_found(response).await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_allow_removing_a_torrent_from_the_whitelist() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .remove_torrent_from_whitelist(&hash) .await; assert_ok(response).await; - assert!(!test_env.tracker.is_info_hash_whitelisted(&info_hash).await); + assert!(!env.tracker.is_info_hash_whitelisted(&info_hash).await); - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_not_fail_trying_to_remove_a_non_whitelisted_torrent_from_the_whitelist() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let non_whitelisted_torrent_hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .remove_torrent_from_whitelist(&non_whitelisted_torrent_hash) .await; assert_ok(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_removing_a_torrent_from_the_whitelist_when_the_provided_infohash_is_invalid() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; for invalid_infohash in &invalid_infohashes_returning_bad_request() { - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .remove_torrent_from_whitelist(invalid_infohash) .await; @@ -162,99 +154,97 @@ async fn should_fail_removing_a_torrent_from_the_whitelist_when_the_provided_inf } for invalid_infohash in &invalid_infohashes_returning_not_found() { - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .remove_torrent_from_whitelist(invalid_infohash) .await; assert_not_found(response).await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_torrent_cannot_be_removed_from_the_whitelist() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - force_database_error(&test_env.tracker); + force_database_error(&env.tracker); - let response = Client::new(test_env.get_connection_info()) + let response = Client::new(env.get_connection_info()) .remove_torrent_from_whitelist(&hash) .await; assert_failed_to_remove_torrent_from_whitelist(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_not_allow_removing_a_torrent_from_the_whitelist_for_unauthenticated_users() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(connection_with_invalid_token( - test_env.get_connection_info().bind_address.as_str(), - )) - .remove_torrent_from_whitelist(&hash) - .await; + env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + let response = Client::new(connection_with_invalid_token(env.get_connection_info().bind_address.as_str())) + .remove_torrent_from_whitelist(&hash) + .await; assert_token_not_valid(response).await; - test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(connection_with_no_token(test_env.get_connection_info().bind_address.as_str())) + env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + let response = Client::new(connection_with_no_token(env.get_connection_info().bind_address.as_str())) .remove_torrent_from_whitelist(&hash) .await; assert_unauthorized(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_allow_reload_the_whitelist_from_the_database() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - let response = Client::new(test_env.get_connection_info()).reload_whitelist().await; + let response = Client::new(env.get_connection_info()).reload_whitelist().await; assert_ok(response).await; /* todo: this assert fails because the whitelist has not been reloaded yet. We could add a new endpoint GET /api/whitelist/:info_hash to check if a torrent is whitelisted and use that endpoint to check if the torrent is still there after reloading. assert!( - !(test_env + !(env .tracker .is_info_hash_whitelisted(&InfoHash::from_str(&info_hash).unwrap()) .await) ); */ - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_whitelist_cannot_be_reloaded_from_the_database() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let hash = "9e0217d0fa71c87332cd8bf9dbeabcb2c2cf3c4d".to_owned(); let info_hash = InfoHash::from_str(&hash).unwrap(); - test_env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); + env.tracker.add_torrent_to_whitelist(&info_hash).await.unwrap(); - force_database_error(&test_env.tracker); + force_database_error(&env.tracker); - let response = Client::new(test_env.get_connection_info()).reload_whitelist().await; + let response = Client::new(env.get_connection_info()).reload_whitelist().await; assert_failed_to_reload_whitelist(response).await; - test_env.stop().await; + env.stop().await; } diff --git a/tests/servers/health_check_api/client.rs b/tests/servers/health_check_api/client.rs new file mode 100644 index 000000000..3d8bdc7d6 --- /dev/null +++ b/tests/servers/health_check_api/client.rs @@ -0,0 +1,5 @@ +use reqwest::Response; + +pub async fn get(path: &str) -> Response { + reqwest::Client::builder().build().unwrap().get(path).send().await.unwrap() +} diff --git a/tests/servers/health_check_api/contract.rs b/tests/servers/health_check_api/contract.rs new file mode 100644 index 000000000..3c3c13151 --- /dev/null +++ b/tests/servers/health_check_api/contract.rs @@ -0,0 +1,327 @@ +use torrust_tracker::servers::health_check_api::resources::{Report, Status}; +use torrust_tracker::servers::registar::Registar; +use torrust_tracker_test_helpers::configuration; + +use crate::servers::health_check_api::client::get; +use crate::servers::health_check_api::Started; + +#[tokio::test] +async fn health_check_endpoint_should_return_status_ok_when_there_is_no_services_registered() { + let configuration = configuration::ephemeral_with_no_services(); + + let env = Started::new(&configuration.health_check_api.into(), Registar::default()).await; + + let response = get(&format!("http://{}/health_check", env.state.binding)).await; + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + + let report = response + .json::() + .await + .expect("it should be able to get the report as json"); + + assert_eq!(report.status, Status::None); + + env.stop().await.expect("it should stop the service"); +} + +mod api { + use std::sync::Arc; + + use torrust_tracker::servers::health_check_api::resources::{Report, Status}; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::api; + use crate::servers::health_check_api::client::get; + use crate::servers::health_check_api::Started; + + #[tokio::test] + pub(crate) async fn it_should_return_good_health_for_api_service() { + let configuration = Arc::new(configuration::ephemeral()); + + let service = api::Started::new(&configuration).await; + + let registar = service.registar.clone(); + + { + let config = configuration.health_check_api.clone(); + let env = Started::new(&config.into(), registar).await; + + let response = get(&format!("http://{}/health_check", env.state.binding)).await; + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + + let report: Report = response + .json() + .await + .expect("it should be able to get the report from the json"); + + assert_eq!(report.status, Status::Ok); + assert_eq!(report.message, String::new()); + + let details = report.details.first().expect("it should have some details"); + + assert_eq!(details.binding, service.bind_address()); + + assert_eq!(details.result, Ok("200 OK".to_string())); + + assert_eq!( + details.info, + format!( + "checking api health check at: http://{}/api/health_check", + service.bind_address() + ) + ); + + env.stop().await.expect("it should stop the service"); + } + + service.stop().await; + } + + #[tokio::test] + pub(crate) async fn it_should_return_error_when_api_service_was_stopped_after_registration() { + let configuration = Arc::new(configuration::ephemeral()); + + let service = api::Started::new(&configuration).await; + + let binding = service.bind_address(); + + let registar = service.registar.clone(); + + service.server.stop().await.expect("it should stop udp server"); + + { + let config = configuration.health_check_api.clone(); + let env = Started::new(&config.into(), registar).await; + + let response = get(&format!("http://{}/health_check", env.state.binding)).await; + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + + let report: Report = response + .json() + .await + .expect("it should be able to get the report from the json"); + + assert_eq!(report.status, Status::Error); + assert_eq!(report.message, "health check failed".to_string()); + + let details = report.details.first().expect("it should have some details"); + + assert_eq!(details.binding, binding); + assert!( + details + .result + .as_ref() + .is_err_and(|e| e.contains("error sending request for url")), + "Expected to contain, \"error sending request for url\", but have message \"{:?}\".", + details.result + ); + assert_eq!( + details.info, + format!("checking api health check at: http://{binding}/api/health_check") + ); + + env.stop().await.expect("it should stop the service"); + } + } +} + +mod http { + use std::sync::Arc; + + use torrust_tracker::servers::health_check_api::resources::{Report, Status}; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::health_check_api::client::get; + use crate::servers::health_check_api::Started; + use crate::servers::http; + + #[tokio::test] + pub(crate) async fn it_should_return_good_health_for_http_service() { + let configuration = Arc::new(configuration::ephemeral()); + + let service = http::Started::new(&configuration).await; + + let registar = service.registar.clone(); + + { + let config = configuration.health_check_api.clone(); + let env = Started::new(&config.into(), registar).await; + + let response = get(&format!("http://{}/health_check", env.state.binding)).await; + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + + let report: Report = response + .json() + .await + .expect("it should be able to get the report from the json"); + + assert_eq!(report.status, Status::Ok); + assert_eq!(report.message, String::new()); + + let details = report.details.first().expect("it should have some details"); + + assert_eq!(details.binding, *service.bind_address()); + assert_eq!(details.result, Ok("200 OK".to_string())); + + assert_eq!( + details.info, + format!( + "checking http tracker health check at: http://{}/health_check", + service.bind_address() + ) + ); + + env.stop().await.expect("it should stop the service"); + } + + service.stop().await; + } + + #[tokio::test] + pub(crate) async fn it_should_return_error_when_http_service_was_stopped_after_registration() { + let configuration = Arc::new(configuration::ephemeral()); + + let service = http::Started::new(&configuration).await; + + let binding = *service.bind_address(); + + let registar = service.registar.clone(); + + service.server.stop().await.expect("it should stop udp server"); + + { + let config = configuration.health_check_api.clone(); + let env = Started::new(&config.into(), registar).await; + + let response = get(&format!("http://{}/health_check", env.state.binding)).await; + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + + let report: Report = response + .json() + .await + .expect("it should be able to get the report from the json"); + + assert_eq!(report.status, Status::Error); + assert_eq!(report.message, "health check failed".to_string()); + + let details = report.details.first().expect("it should have some details"); + + assert_eq!(details.binding, binding); + assert!( + details + .result + .as_ref() + .is_err_and(|e| e.contains("error sending request for url")), + "Expected to contain, \"error sending request for url\", but have message \"{:?}\".", + details.result + ); + assert_eq!( + details.info, + format!("checking http tracker health check at: http://{binding}/health_check") + ); + + env.stop().await.expect("it should stop the service"); + } + } +} + +mod udp { + use std::sync::Arc; + + use torrust_tracker::servers::health_check_api::resources::{Report, Status}; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::health_check_api::client::get; + use crate::servers::health_check_api::Started; + use crate::servers::udp; + + #[tokio::test] + pub(crate) async fn it_should_return_good_health_for_udp_service() { + let configuration = Arc::new(configuration::ephemeral()); + + let service = udp::Started::new(&configuration).await; + + let registar = service.registar.clone(); + + { + let config = configuration.health_check_api.clone(); + let env = Started::new(&config.into(), registar).await; + + let response = get(&format!("http://{}/health_check", env.state.binding)).await; + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + + let report: Report = response + .json() + .await + .expect("it should be able to get the report from the json"); + + assert_eq!(report.status, Status::Ok); + assert_eq!(report.message, String::new()); + + let details = report.details.first().expect("it should have some details"); + + assert_eq!(details.binding, service.bind_address()); + assert_eq!(details.result, Ok("Connected".to_string())); + + assert_eq!( + details.info, + format!("checking the udp tracker health check at: {}", service.bind_address()) + ); + + env.stop().await.expect("it should stop the service"); + } + + service.stop().await; + } + + #[tokio::test] + pub(crate) async fn it_should_return_error_when_udp_service_was_stopped_after_registration() { + let configuration = Arc::new(configuration::ephemeral()); + + let service = udp::Started::new(&configuration).await; + + let binding = service.bind_address(); + + let registar = service.registar.clone(); + + service.server.stop().await.expect("it should stop udp server"); + + { + let config = configuration.health_check_api.clone(); + let env = Started::new(&config.into(), registar).await; + + let response = get(&format!("http://{}/health_check", env.state.binding)).await; + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + + let report: Report = response + .json() + .await + .expect("it should be able to get the report from the json"); + + assert_eq!(report.status, Status::Error); + assert_eq!(report.message, "health check failed".to_string()); + + let details = report.details.first().expect("it should have some details"); + + assert_eq!(details.binding, binding); + assert_eq!(details.result, Err("Timed Out".to_string())); + assert_eq!(details.info, format!("checking the udp tracker health check at: {binding}")); + + env.stop().await.expect("it should stop the service"); + } + } +} diff --git a/tests/servers/health_check_api/environment.rs b/tests/servers/health_check_api/environment.rs new file mode 100644 index 000000000..cf0566d67 --- /dev/null +++ b/tests/servers/health_check_api/environment.rs @@ -0,0 +1,99 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use tokio::sync::oneshot::{self, Sender}; +use tokio::task::JoinHandle; +use torrust_tracker::bootstrap::jobs::Started; +use torrust_tracker::servers::health_check_api::{server, HEALTH_CHECK_API_LOG_TARGET}; +use torrust_tracker::servers::registar::Registar; +use torrust_tracker::servers::signals::{self, Halted}; +use torrust_tracker_configuration::HealthCheckApi; +use tracing::debug; + +#[derive(Debug)] +pub enum Error { + #[allow(dead_code)] + Error(String), +} + +pub struct Running { + pub binding: SocketAddr, + pub halt_task: Sender, + pub task: JoinHandle, +} + +pub struct Stopped { + pub bind_to: SocketAddr, +} + +pub struct Environment { + pub registar: Registar, + pub state: S, +} + +impl Environment { + pub fn new(config: &Arc, registar: Registar) -> Self { + let bind_to = config.bind_address; + + Self { + registar, + state: Stopped { bind_to }, + } + } + + /// Start the test environment for the Health Check API. + /// It runs the API server. + pub async fn start(self) -> Environment { + let (tx_start, rx_start) = oneshot::channel::(); + let (tx_halt, rx_halt) = tokio::sync::oneshot::channel::(); + + let register = self.registar.entries(); + + debug!(target: HEALTH_CHECK_API_LOG_TARGET, "Spawning task to launch the service ..."); + + let server = tokio::spawn(async move { + debug!(target: HEALTH_CHECK_API_LOG_TARGET, "Starting the server in a spawned task ..."); + + server::start(self.state.bind_to, tx_start, rx_halt, register) + .await + .expect("it should start the health check service"); + + debug!(target: HEALTH_CHECK_API_LOG_TARGET, "Server started. Sending the binding {} ...", self.state.bind_to); + + self.state.bind_to + }); + + debug!(target: HEALTH_CHECK_API_LOG_TARGET, "Waiting for spawning task to send the binding ..."); + + let binding = rx_start.await.expect("it should send service binding").address; + + Environment { + registar: self.registar.clone(), + state: Running { + task: server, + halt_task: tx_halt, + binding, + }, + } + } +} + +impl Environment { + pub async fn new(config: &Arc, registar: Registar) -> Self { + Environment::::new(config, registar).start().await + } + + pub async fn stop(self) -> Result, Error> { + self.state + .halt_task + .send(Halted::Normal) + .map_err(|e| Error::Error(e.to_string()))?; + + let bind_to = self.state.task.await.expect("it should shutdown the service"); + + Ok(Environment { + registar: self.registar.clone(), + state: Stopped { bind_to }, + }) + } +} diff --git a/tests/servers/health_check_api/mod.rs b/tests/servers/health_check_api/mod.rs new file mode 100644 index 000000000..9e15c5f62 --- /dev/null +++ b/tests/servers/health_check_api/mod.rs @@ -0,0 +1,5 @@ +pub mod client; +pub mod contract; +pub mod environment; + +pub type Started = environment::Environment; diff --git a/tests/servers/http/client.rs b/tests/servers/http/client.rs index f5cdca398..288987c55 100644 --- a/tests/servers/http/client.rs +++ b/tests/servers/http/client.rs @@ -1,7 +1,7 @@ use std::net::IpAddr; use reqwest::{Client as ReqwestClient, Response}; -use torrust_tracker::tracker::auth::Key; +use torrust_tracker::core::auth::Key; use super::requests::announce::{self, Query}; use super::requests::scrape; @@ -9,7 +9,7 @@ use super::requests::scrape; /// HTTP Tracker Client pub struct Client { server_addr: std::net::SocketAddr, - reqwest_client: ReqwestClient, + reqwest: ReqwestClient, key: Option, } @@ -25,7 +25,7 @@ impl Client { pub fn new(server_addr: std::net::SocketAddr) -> Self { Self { server_addr, - reqwest_client: reqwest::Client::builder().build().unwrap(), + reqwest: reqwest::Client::builder().build().unwrap(), key: None, } } @@ -34,7 +34,7 @@ impl Client { pub fn bind(server_addr: std::net::SocketAddr, local_address: IpAddr) -> Self { Self { server_addr, - reqwest_client: reqwest::Client::builder().local_address(local_address).build().unwrap(), + reqwest: reqwest::Client::builder().local_address(local_address).build().unwrap(), key: None, } } @@ -42,7 +42,7 @@ impl Client { pub fn authenticated(server_addr: std::net::SocketAddr, key: Key) -> Self { Self { server_addr, - reqwest_client: reqwest::Client::builder().build().unwrap(), + reqwest: reqwest::Client::builder().build().unwrap(), key: Some(key), } } @@ -60,12 +60,16 @@ impl Client { .await } + pub async fn health_check(&self) -> Response { + self.get(&self.build_path("health_check")).await + } + pub async fn get(&self, path: &str) -> Response { - self.reqwest_client.get(self.build_url(path)).send().await.unwrap() + self.reqwest.get(self.build_url(path)).send().await.unwrap() } pub async fn get_with_header(&self, path: &str, key: &str, value: &str) -> Response { - self.reqwest_client + self.reqwest .get(self.build_url(path)) .header(key, value) .send() diff --git a/tests/servers/http/connection_info.rs b/tests/servers/http/connection_info.rs index 5736271fd..f4081d60e 100644 --- a/tests/servers/http/connection_info.rs +++ b/tests/servers/http/connection_info.rs @@ -1,4 +1,4 @@ -use torrust_tracker::tracker::auth::Key; +use torrust_tracker::core::auth::Key; #[derive(Clone, Debug)] pub struct ConnectionInfo { diff --git a/tests/servers/http/environment.rs b/tests/servers/http/environment.rs new file mode 100644 index 000000000..b6bb21c16 --- /dev/null +++ b/tests/servers/http/environment.rs @@ -0,0 +1,82 @@ +use std::sync::Arc; + +use futures::executor::block_on; +use torrust_tracker::bootstrap::app::initialize_with_configuration; +use torrust_tracker::bootstrap::jobs::make_rust_tls; +use torrust_tracker::core::Tracker; +use torrust_tracker::servers::http::server::{HttpServer, Launcher, Running, Stopped}; +use torrust_tracker::servers::registar::Registar; +use torrust_tracker_configuration::{Configuration, HttpTracker}; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer; + +pub struct Environment { + pub config: Arc, + pub tracker: Arc, + pub registar: Registar, + pub server: HttpServer, +} + +impl Environment { + /// Add a torrent to the tracker + pub fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &peer::Peer) { + self.tracker.upsert_peer_and_get_stats(info_hash, peer); + } +} + +impl Environment { + #[allow(dead_code)] + pub fn new(configuration: &Arc) -> Self { + let tracker = initialize_with_configuration(configuration); + + let http_tracker = configuration + .http_trackers + .clone() + .expect("missing HTTP tracker configuration"); + + let config = Arc::new(http_tracker[0].clone()); + + let bind_to = config.bind_address; + + let tls = block_on(make_rust_tls(&config.tsl_config)).map(|tls| tls.expect("tls config failed")); + + let server = HttpServer::new(Launcher::new(bind_to, tls)); + + Self { + config, + tracker, + registar: Registar::default(), + server, + } + } + + #[allow(dead_code)] + pub async fn start(self) -> Environment { + Environment { + config: self.config, + tracker: self.tracker.clone(), + registar: self.registar.clone(), + server: self.server.start(self.tracker, self.registar.give_form()).await.unwrap(), + } + } +} + +impl Environment { + pub async fn new(configuration: &Arc) -> Self { + Environment::::new(configuration).start().await + } + + pub async fn stop(self) -> Environment { + Environment { + config: self.config, + tracker: self.tracker, + registar: Registar::default(), + + server: self.server.stop().await.unwrap(), + } + } + + pub fn bind_address(&self) -> &std::net::SocketAddr { + &self.server.state.binding + } +} diff --git a/tests/servers/http/mod.rs b/tests/servers/http/mod.rs index cb2885df0..65affc433 100644 --- a/tests/servers/http/mod.rs +++ b/tests/servers/http/mod.rs @@ -1,11 +1,14 @@ pub mod asserts; pub mod client; +pub mod environment; pub mod requests; pub mod responses; -pub mod test_environment; pub mod v1; +pub type Started = environment::Environment; + use percent_encoding::NON_ALPHANUMERIC; +use torrust_tracker::servers::http::server; pub type ByteArray20 = [u8; 20]; diff --git a/tests/servers/http/requests/announce.rs b/tests/servers/http/requests/announce.rs index f7f25da3e..061990621 100644 --- a/tests/servers/http/requests/announce.rs +++ b/tests/servers/http/requests/announce.rs @@ -3,8 +3,8 @@ use std::net::{IpAddr, Ipv4Addr}; use std::str::FromStr; use serde_repr::Serialize_repr; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -use torrust_tracker::tracker::peer::Id; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer; use crate::servers::http::{percent_encode_byte_array, ByteArray20}; @@ -93,7 +93,7 @@ impl QueryBuilder { peer_addr: IpAddr::V4(Ipv4Addr::new(192, 168, 1, 88)), downloaded: 0, uploaded: 0, - peer_id: Id(*b"-qB00000000000000001").0, + peer_id: peer::Id(*b"-qB00000000000000001").0, port: 17548, left: 0, event: Some(Event::Completed), @@ -109,7 +109,7 @@ impl QueryBuilder { self } - pub fn with_peer_id(mut self, peer_id: &Id) -> Self { + pub fn with_peer_id(mut self, peer_id: &peer::Id) -> Self { self.announce_query.peer_id = peer_id.0; self } diff --git a/tests/servers/http/requests/scrape.rs b/tests/servers/http/requests/scrape.rs index 264c72c33..f66605855 100644 --- a/tests/servers/http/requests/scrape.rs +++ b/tests/servers/http/requests/scrape.rs @@ -1,7 +1,7 @@ use std::fmt; use std::str::FromStr; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; +use torrust_tracker_primitives::info_hash::InfoHash; use crate::servers::http::{percent_encode_byte_array, ByteArray20}; diff --git a/tests/servers/http/responses/announce.rs b/tests/servers/http/responses/announce.rs index 8a07ebd5e..2b49b4405 100644 --- a/tests/servers/http/responses/announce.rs +++ b/tests/servers/http/responses/announce.rs @@ -1,7 +1,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr}; -use serde::{self, Deserialize, Serialize}; -use torrust_tracker::tracker::peer::Peer; +use serde::{Deserialize, Serialize}; +use torrust_tracker_primitives::peer; #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct Announce { @@ -22,8 +22,8 @@ pub struct DictionaryPeer { pub port: u16, } -impl From for DictionaryPeer { - fn from(peer: Peer) -> Self { +impl From for DictionaryPeer { + fn from(peer: peer::Peer) -> Self { DictionaryPeer { peer_id: peer.peer_id.to_bytes().to_vec(), ip: peer.peer_addr.ip().to_string(), diff --git a/tests/servers/http/responses/error.rs b/tests/servers/http/responses/error.rs index 12c53a0cf..00befdb54 100644 --- a/tests/servers/http/responses/error.rs +++ b/tests/servers/http/responses/error.rs @@ -1,4 +1,4 @@ -use serde::{self, Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; #[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct Error { diff --git a/tests/servers/http/responses/scrape.rs b/tests/servers/http/responses/scrape.rs index 221ff0a38..fc741cbf4 100644 --- a/tests/servers/http/responses/scrape.rs +++ b/tests/servers/http/responses/scrape.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; use std::str; -use serde::{self, Deserialize, Serialize}; +use serde::{Deserialize, Serialize}; use serde_bencode::value::Value; use crate::servers::http::{ByteArray20, InfoHash}; @@ -73,9 +73,13 @@ impl ResponseBuilder { #[derive(Debug)] pub enum BencodeParseError { + #[allow(dead_code)] InvalidValueExpectedDict { value: Value }, + #[allow(dead_code)] InvalidValueExpectedInt { value: Value }, + #[allow(dead_code)] InvalidFileField { value: Value }, + #[allow(dead_code)] MissingFileField { field_name: String }, } diff --git a/tests/servers/http/test_environment.rs b/tests/servers/http/test_environment.rs deleted file mode 100644 index 8d0aaba02..000000000 --- a/tests/servers/http/test_environment.rs +++ /dev/null @@ -1,120 +0,0 @@ -use std::sync::Arc; - -use torrust_tracker::servers::http::server::{HttpServer, HttpServerLauncher, RunningHttpServer, StoppedHttpServer}; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -use torrust_tracker::tracker::peer::Peer; -use torrust_tracker::tracker::Tracker; - -use crate::common::app::setup_with_configuration; - -#[allow(clippy::module_name_repetitions, dead_code)] -pub type StoppedTestEnvironment = TestEnvironment>; -#[allow(clippy::module_name_repetitions)] -pub type RunningTestEnvironment = TestEnvironment>; - -pub struct TestEnvironment { - pub cfg: Arc, - pub tracker: Arc, - pub state: S, -} - -#[allow(dead_code)] -pub struct Stopped { - http_server: StoppedHttpServer, -} - -pub struct Running { - http_server: RunningHttpServer, -} - -impl TestEnvironment { - /// Add a torrent to the tracker - pub async fn add_torrent_peer(&self, info_hash: &InfoHash, peer: &Peer) { - self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; - } -} - -impl TestEnvironment> { - #[allow(dead_code)] - pub fn new_stopped(cfg: torrust_tracker_configuration::Configuration) -> Self { - let cfg = Arc::new(cfg); - - let tracker = setup_with_configuration(&cfg); - - let http_server = http_server(cfg.http_trackers[0].clone()); - - Self { - cfg, - tracker, - state: Stopped { http_server }, - } - } - - #[allow(dead_code)] - pub async fn start(self) -> TestEnvironment> { - TestEnvironment { - cfg: self.cfg, - tracker: self.tracker.clone(), - state: Running { - http_server: self.state.http_server.start(self.tracker).await.unwrap(), - }, - } - } - - #[allow(dead_code)] - pub fn config(&self) -> &torrust_tracker_configuration::HttpTracker { - &self.state.http_server.cfg - } - - #[allow(dead_code)] - pub fn config_mut(&mut self) -> &mut torrust_tracker_configuration::HttpTracker { - &mut self.state.http_server.cfg - } -} - -impl TestEnvironment> { - pub async fn new_running(cfg: torrust_tracker_configuration::Configuration) -> Self { - let test_env = StoppedTestEnvironment::new_stopped(cfg); - - test_env.start().await - } - - pub async fn stop(self) -> TestEnvironment> { - TestEnvironment { - cfg: self.cfg, - tracker: self.tracker, - state: Stopped { - http_server: self.state.http_server.stop().await.unwrap(), - }, - } - } - - pub fn bind_address(&self) -> &std::net::SocketAddr { - &self.state.http_server.state.bind_addr - } - - #[allow(dead_code)] - pub fn config(&self) -> &torrust_tracker_configuration::HttpTracker { - &self.state.http_server.cfg - } -} - -#[allow(clippy::module_name_repetitions, dead_code)] -pub fn stopped_test_environment( - cfg: torrust_tracker_configuration::Configuration, -) -> StoppedTestEnvironment { - TestEnvironment::new_stopped(cfg) -} - -#[allow(clippy::module_name_repetitions)] -pub async fn running_test_environment( - cfg: torrust_tracker_configuration::Configuration, -) -> RunningTestEnvironment { - TestEnvironment::new_running(cfg).await -} - -pub fn http_server(cfg: torrust_tracker_configuration::HttpTracker) -> StoppedHttpServer { - let http_server = I::new(); - - HttpServer::new(cfg, http_server) -} diff --git a/tests/servers/http/v1/contract.rs b/tests/servers/http/v1/contract.rs index 2e24af6b7..14c237984 100644 --- a/tests/servers/http/v1/contract.rs +++ b/tests/servers/http/v1/contract.rs @@ -1,56 +1,72 @@ use torrust_tracker_test_helpers::configuration; -use crate::servers::http::test_environment::running_test_environment; - -pub type V1 = torrust_tracker::servers::http::v1::launcher::Launcher; +use crate::servers::http::Started; #[tokio::test] -async fn test_environment_should_be_started_and_stopped() { - let test_env = running_test_environment::(configuration::ephemeral()).await; +async fn environment_should_be_started_and_stopped() { + let env = Started::new(&configuration::ephemeral().into()).await; - test_env.stop().await; + env.stop().await; } mod for_all_config_modes { + use torrust_tracker::servers::http::v1::handlers::health_check::{Report, Status}; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::http::client::Client; + use crate::servers::http::Started; + + #[tokio::test] + async fn health_check_endpoint_should_return_ok_if_the_http_tracker_is_running() { + let env = Started::new(&configuration::ephemeral_with_reverse_proxy().into()).await; + + let response = Client::new(*env.bind_address()).health_check().await; + + assert_eq!(response.status(), 200); + assert_eq!(response.headers().get("content-type").unwrap(), "application/json"); + assert_eq!(response.json::().await.unwrap(), Report { status: Status::Ok }); + + env.stop().await; + } + mod and_running_on_reverse_proxy { use torrust_tracker_test_helpers::configuration; use crate::servers::http::asserts::assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response; use crate::servers::http::client::Client; use crate::servers::http::requests::announce::QueryBuilder; - use crate::servers::http::test_environment::running_test_environment; - use crate::servers::http::v1::contract::V1; + use crate::servers::http::Started; #[tokio::test] async fn should_fail_when_the_http_request_does_not_include_the_xff_http_request_header() { // If the tracker is running behind a reverse proxy, the peer IP is the // right most IP in the `X-Forwarded-For` HTTP header, which is the IP of the proxy's client. - let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; + let env = Started::new(&configuration::ephemeral_with_reverse_proxy().into()).await; let params = QueryBuilder::default().query().params(); - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_xff_http_request_header_contains_an_invalid_ip() { - let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; + let env = Started::new(&configuration::ephemeral_with_reverse_proxy().into()).await; let params = QueryBuilder::default().query().params(); - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .get_with_header(&format!("announce?{params}"), "X-Forwarded-For", "INVALID IP") .await; assert_could_not_find_remote_address_on_x_forwarded_for_header_error_response(response).await; - test_env.stop().await; + env.stop().await; } } @@ -71,13 +87,14 @@ mod for_all_config_modes { use std::str::FromStr; use local_ip_address::local_ip; - use reqwest::Response; + use reqwest::{Response, StatusCode}; use tokio::net::TcpListener; - use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; - use torrust_tracker::tracker::peer; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; - use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; + use crate::common::fixtures::invalid_info_hashes; use crate::servers::http::asserts::{ assert_announce_response, assert_bad_announce_request_error_response, assert_cannot_parse_query_param_error_response, assert_cannot_parse_query_params_error_response, assert_compact_announce_response, assert_empty_announce_response, @@ -85,55 +102,59 @@ mod for_all_config_modes { }; use crate::servers::http::client::Client; use crate::servers::http::requests::announce::{Compact, QueryBuilder}; - use crate::servers::http::responses; use crate::servers::http::responses::announce::{Announce, CompactPeer, CompactPeerList, DictionaryPeer}; - use crate::servers::http::test_environment::running_test_environment; - use crate::servers::http::v1::contract::V1; + use crate::servers::http::{responses, Started}; + + #[tokio::test] + async fn it_should_start_and_stop() { + let env = Started::new(&configuration::ephemeral_public().into()).await; + env.stop().await; + } #[tokio::test] async fn should_respond_if_only_the_mandatory_fields_are_provided() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let mut params = QueryBuilder::default().query().params(); params.remove_optional_params(); - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_is_announce_response(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_url_query_component_is_empty() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - let response = Client::new(*test_env.bind_address()).get("announce").await; + let response = Client::new(*env.bind_address()).get("announce").await; assert_missing_query_params_for_announce_request_error_response(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_url_query_parameters_are_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let invalid_query_param = "a=b=c"; - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .get(&format!("announce?{invalid_query_param}")) .await; assert_cannot_parse_query_param_error_response(response, "invalid param a=b=c").await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_a_mandatory_field_is_missing() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; // Without `info_hash` param @@ -141,7 +162,7 @@ mod for_all_config_modes { params.info_hash = None; - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "missing param info_hash").await; @@ -151,7 +172,7 @@ mod for_all_config_modes { params.peer_id = None; - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "missing param peer_id").await; @@ -161,28 +182,28 @@ mod for_all_config_modes { params.port = None; - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "missing param port").await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let mut params = QueryBuilder::default().query().params(); for invalid_value in &invalid_info_hashes() { params.set("info_hash", invalid_value); - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_cannot_parse_query_params_error_response(response, "").await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] @@ -192,22 +213,22 @@ mod for_all_config_modes { // 1. If tracker is NOT running `on_reverse_proxy` from the remote client IP. // 2. If tracker is running `on_reverse_proxy` from `X-Forwarded-For` request HTTP header. - let test_env = running_test_environment::(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let mut params = QueryBuilder::default().query().params(); params.peer_addr = Some("INVALID-IP-ADDRESS".to_string()); - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_is_announce_response(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_downloaded_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let mut params = QueryBuilder::default().query().params(); @@ -216,17 +237,17 @@ mod for_all_config_modes { for invalid_value in invalid_values { params.set("downloaded", invalid_value); - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "invalid param value").await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_uploaded_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let mut params = QueryBuilder::default().query().params(); @@ -235,17 +256,17 @@ mod for_all_config_modes { for invalid_value in invalid_values { params.set("uploaded", invalid_value); - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "invalid param value").await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_peer_id_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let mut params = QueryBuilder::default().query().params(); @@ -261,17 +282,17 @@ mod for_all_config_modes { for invalid_value in invalid_values { params.set("peer_id", invalid_value); - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "invalid param value").await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_port_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let mut params = QueryBuilder::default().query().params(); @@ -280,17 +301,17 @@ mod for_all_config_modes { for invalid_value in invalid_values { params.set("port", invalid_value); - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "invalid param value").await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_left_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let mut params = QueryBuilder::default().query().params(); @@ -299,17 +320,17 @@ mod for_all_config_modes { for invalid_value in invalid_values { params.set("left", invalid_value); - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "invalid param value").await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_event_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let mut params = QueryBuilder::default().query().params(); @@ -326,17 +347,17 @@ mod for_all_config_modes { for invalid_value in invalid_values { params.set("event", invalid_value); - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "invalid param value").await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_compact_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; let mut params = QueryBuilder::default().query().params(); @@ -345,19 +366,19 @@ mod for_all_config_modes { for invalid_value in invalid_values { params.set("compact", invalid_value); - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_bad_announce_request_error_response(response, "invalid param value").await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_return_no_peers_if_the_announced_peer_is_the_first_one() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .announce( &QueryBuilder::default() .with_info_hash(&InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap()) @@ -365,24 +386,26 @@ mod for_all_config_modes { ) .await; + let announce_policy = env.tracker.get_announce_policy(); + assert_announce_response( response, &Announce { complete: 1, // the peer for this test incomplete: 0, - interval: test_env.tracker.config.announce_interval, - min_interval: test_env.tracker.config.min_announce_interval, + interval: announce_policy.interval, + min_interval: announce_policy.interval_min, peers: vec![], }, ) .await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_return_the_list_of_previously_announced_peers() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -392,10 +415,10 @@ mod for_all_config_modes { .build(); // Add the Peer 1 - test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; + env.add_torrent_peer(&info_hash, &previously_announced_peer); // Announce the new Peer 2. This new peer is non included on the response peer list - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -404,25 +427,27 @@ mod for_all_config_modes { ) .await; + let announce_policy = env.tracker.get_announce_policy(); + // It should only contain the previously announced peer assert_announce_response( response, &Announce { complete: 2, incomplete: 0, - interval: test_env.tracker.config.announce_interval, - min_interval: test_env.tracker.config.min_announce_interval, + interval: announce_policy.interval, + min_interval: announce_policy.interval_min, peers: vec![DictionaryPeer::from(previously_announced_peer)], }, ) .await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_return_the_list_of_previously_announced_peers_including_peers_using_ipv4_and_ipv6() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -431,7 +456,7 @@ mod for_all_config_modes { .with_peer_id(&peer::Id(*b"-qB00000000000000001")) .with_peer_addr(&SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0x69, 0x69, 0x69, 0x69)), 8080)) .build(); - test_env.add_torrent_peer(&info_hash, &peer_using_ipv4).await; + env.add_torrent_peer(&info_hash, &peer_using_ipv4); // Announce a peer using IPV6 let peer_using_ipv6 = PeerBuilder::default() @@ -441,10 +466,10 @@ mod for_all_config_modes { 8080, )) .build(); - test_env.add_torrent_peer(&info_hash, &peer_using_ipv6).await; + env.add_torrent_peer(&info_hash, &peer_using_ipv6); // Announce the new Peer. - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -453,6 +478,8 @@ mod for_all_config_modes { ) .await; + let announce_policy = env.tracker.get_announce_policy(); + // The newly announced peer is not included on the response peer list, // but all the previously announced peers should be included regardless the IP version they are using. assert_announce_response( @@ -460,25 +487,25 @@ mod for_all_config_modes { &Announce { complete: 3, incomplete: 0, - interval: test_env.tracker.config.announce_interval, - min_interval: test_env.tracker.config.min_announce_interval, + interval: announce_policy.interval, + min_interval: announce_policy.interval_min, peers: vec![DictionaryPeer::from(peer_using_ipv4), DictionaryPeer::from(peer_using_ipv6)], }, ) .await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_consider_two_peers_to_be_the_same_when_they_have_the_same_peer_id_even_if_the_ip_is_different() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let peer = PeerBuilder::default().build(); // Add a peer - test_env.add_torrent_peer(&info_hash, &peer).await; + env.add_torrent_peer(&info_hash, &peer); let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) @@ -487,11 +514,11 @@ mod for_all_config_modes { assert_ne!(peer.peer_addr.ip(), announce_query.peer_addr); - let response = Client::new(*test_env.bind_address()).announce(&announce_query).await; + let response = Client::new(*env.bind_address()).announce(&announce_query).await; assert_empty_announce_response(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] @@ -499,7 +526,7 @@ mod for_all_config_modes { // Tracker Returns Compact Peer Lists // https://www.bittorrent.org/beps/bep_0023.html - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -509,10 +536,10 @@ mod for_all_config_modes { .build(); // Add the Peer 1 - test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; + env.add_torrent_peer(&info_hash, &previously_announced_peer); // Announce the new Peer 2 accepting compact responses - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -532,7 +559,7 @@ mod for_all_config_modes { assert_compact_announce_response(response, &expected_response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] @@ -540,7 +567,7 @@ mod for_all_config_modes { // code-review: the HTTP tracker does not return the compact response by default if the "compact" // param is not provided in the announce URL. The BEP 23 suggest to do so. - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); @@ -550,12 +577,12 @@ mod for_all_config_modes { .build(); // Add the Peer 1 - test_env.add_torrent_peer(&info_hash, &previously_announced_peer).await; + env.add_torrent_peer(&info_hash, &previously_announced_peer); // Announce the new Peer 2 without passing the "compact" param // By default it should respond with the compact peer list // https://www.bittorrent.org/beps/bep_0023.html - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .announce( &QueryBuilder::default() .with_info_hash(&info_hash) @@ -567,7 +594,7 @@ mod for_all_config_modes { assert!(!is_a_compact_announce_response(response).await); - test_env.stop().await; + env.stop().await; } async fn is_a_compact_announce_response(response: Response) -> bool { @@ -578,19 +605,19 @@ mod for_all_config_modes { #[tokio::test] async fn should_increase_the_number_of_tcp4_connections_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; - Client::new(*test_env.bind_address()) + Client::new(*env.bind_address()) .announce(&QueryBuilder::default().query()) .await; - let stats = test_env.tracker.get_stats().await; + let stats = env.tracker.get_stats().await; assert_eq!(stats.tcp4_connections_handled, 1); drop(stats); - test_env.stop().await; + env.stop().await; } #[tokio::test] @@ -602,28 +629,28 @@ mod for_all_config_modes { return; // we cannot bind to a ipv6 socket, so we will skip this test } - let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; + let env = Started::new(&configuration::ephemeral_ipv6().into()).await; - Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) + Client::bind(*env.bind_address(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) .await; - let stats = test_env.tracker.get_stats().await; + let stats = env.tracker.get_stats().await; assert_eq!(stats.tcp6_connections_handled, 1); drop(stats); - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_not_increase_the_number_of_tcp6_connections_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; - Client::new(*test_env.bind_address()) + Client::new(*env.bind_address()) .announce( &QueryBuilder::default() .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) @@ -631,30 +658,30 @@ mod for_all_config_modes { ) .await; - let stats = test_env.tracker.get_stats().await; + let stats = env.tracker.get_stats().await; assert_eq!(stats.tcp6_connections_handled, 0); drop(stats); - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_increase_the_number_of_tcp4_announce_requests_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; - Client::new(*test_env.bind_address()) + Client::new(*env.bind_address()) .announce(&QueryBuilder::default().query()) .await; - let stats = test_env.tracker.get_stats().await; + let stats = env.tracker.get_stats().await; assert_eq!(stats.tcp4_announces_handled, 1); drop(stats); - test_env.stop().await; + env.stop().await; } #[tokio::test] @@ -666,28 +693,28 @@ mod for_all_config_modes { return; // we cannot bind to a ipv6 socket, so we will skip this test } - let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; + let env = Started::new(&configuration::ephemeral_ipv6().into()).await; - Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) + Client::bind(*env.bind_address(), IpAddr::from_str("::1").unwrap()) .announce(&QueryBuilder::default().query()) .await; - let stats = test_env.tracker.get_stats().await; + let stats = env.tracker.get_stats().await; assert_eq!(stats.tcp6_announces_handled, 1); drop(stats); - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_not_increase_the_number_of_tcp6_announce_requests_handled_if_the_client_is_not_using_an_ipv6_ip() { // The tracker ignores the peer address in the request param. It uses the client remote ip address. - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; - Client::new(*test_env.bind_address()) + Client::new(*env.bind_address()) .announce( &QueryBuilder::default() .with_peer_addr(&IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1))) @@ -695,38 +722,41 @@ mod for_all_config_modes { ) .await; - let stats = test_env.tracker.get_stats().await; + let stats = env.tracker.get_stats().await; assert_eq!(stats.tcp6_announces_handled, 0); drop(stats); - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_assign_to_the_peer_ip_the_remote_client_ip_instead_of_the_peer_address_in_the_request_param() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let client_ip = local_ip().unwrap(); - let client = Client::bind(*test_env.bind_address(), client_ip); - let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) .query(); - client.announce(&announce_query).await; + { + let client = Client::bind(*env.bind_address(), client_ip); + let status = client.announce(&announce_query).await.status(); + + assert_eq!(status, StatusCode::OK); + } - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; + let peers = env.tracker.get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), client_ip); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); - test_env.stop().await; + env.stop().await; } #[tokio::test] @@ -737,32 +767,32 @@ mod for_all_config_modes { client <-> tracker <-> Internet 127.0.0.1 external_ip = "2.137.87.41" */ - - let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( - IpAddr::from_str("2.137.87.41").unwrap(), - )) - .await; + let env = + Started::new(&configuration::ephemeral_with_external_ip(IpAddr::from_str("2.137.87.41").unwrap()).into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); let client_ip = loopback_ip; - let client = Client::bind(*test_env.bind_address(), client_ip); - let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) .query(); - client.announce(&announce_query).await; + { + let client = Client::bind(*env.bind_address(), client_ip); + let status = client.announce(&announce_query).await.status(); + + assert_eq!(status, StatusCode::OK); + } - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; + let peers = env.tracker.get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; - assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); + assert_eq!(peer_addr.ip(), env.tracker.get_maybe_external_ip().unwrap()); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); - test_env.stop().await; + env.stop().await; } #[tokio::test] @@ -774,31 +804,35 @@ mod for_all_config_modes { ::1 external_ip = "2345:0425:2CA1:0000:0000:0567:5673:23b5" */ - let test_env = running_test_environment::(configuration::ephemeral_with_external_ip( - IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap(), - )) + let env = Started::new( + &configuration::ephemeral_with_external_ip(IpAddr::from_str("2345:0425:2CA1:0000:0000:0567:5673:23b5").unwrap()) + .into(), + ) .await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let loopback_ip = IpAddr::from_str("127.0.0.1").unwrap(); let client_ip = loopback_ip; - let client = Client::bind(*test_env.bind_address(), client_ip); - let announce_query = QueryBuilder::default() .with_info_hash(&info_hash) .with_peer_addr(&IpAddr::from_str("2.2.2.2").unwrap()) .query(); - client.announce(&announce_query).await; + { + let client = Client::bind(*env.bind_address(), client_ip); + let status = client.announce(&announce_query).await.status(); + + assert_eq!(status, StatusCode::OK); + } - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; + let peers = env.tracker.get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; - assert_eq!(peer_addr.ip(), test_env.tracker.config.get_ext_ip().unwrap()); + assert_eq!(peer_addr.ip(), env.tracker.get_maybe_external_ip().unwrap()); assert_ne!(peer_addr.ip(), IpAddr::from_str("2.2.2.2").unwrap()); - test_env.stop().await; + env.stop().await; } #[tokio::test] @@ -810,28 +844,32 @@ mod for_all_config_modes { 145.254.214.256 X-Forwarded-For = 145.254.214.256 on_reverse_proxy = true 145.254.214.256 */ - let test_env = running_test_environment::(configuration::ephemeral_with_reverse_proxy()).await; + let env = Started::new(&configuration::ephemeral_with_reverse_proxy().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let client = Client::new(*test_env.bind_address()); - let announce_query = QueryBuilder::default().with_info_hash(&info_hash).query(); - client - .announce_with_header( - &announce_query, - "X-Forwarded-For", - "203.0.113.195,2001:db8:85a3:8d3:1319:8a2e:370:7348,150.172.238.178", - ) - .await; + { + let client = Client::new(*env.bind_address()); + let status = client + .announce_with_header( + &announce_query, + "X-Forwarded-For", + "203.0.113.195,2001:db8:85a3:8d3:1319:8a2e:370:7348,150.172.238.178", + ) + .await + .status(); + + assert_eq!(status, StatusCode::OK); + } - let peers = test_env.tracker.get_all_torrent_peers(&info_hash).await; + let peers = env.tracker.get_torrent_peers(&info_hash); let peer_addr = peers[0].peer_addr; assert_eq!(peer_addr.ip(), IpAddr::from_str("150.172.238.178").unwrap()); - test_env.stop().await; + env.stop().await; } } @@ -849,67 +887,64 @@ mod for_all_config_modes { use std::str::FromStr; use tokio::net::TcpListener; - use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; - use torrust_tracker::tracker::peer; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; - use crate::common::fixtures::{invalid_info_hashes, PeerBuilder}; + use crate::common::fixtures::invalid_info_hashes; use crate::servers::http::asserts::{ assert_cannot_parse_query_params_error_response, assert_missing_query_params_for_scrape_request_error_response, assert_scrape_response, }; use crate::servers::http::client::Client; - use crate::servers::http::requests; use crate::servers::http::requests::scrape::QueryBuilder; use crate::servers::http::responses::scrape::{self, File, ResponseBuilder}; - use crate::servers::http::test_environment::running_test_environment; - use crate::servers::http::v1::contract::V1; + use crate::servers::http::{requests, Started}; //#[tokio::test] #[allow(dead_code)] async fn should_fail_when_the_request_is_empty() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; - let response = Client::new(*test_env.bind_address()).get("scrape").await; + let env = Started::new(&configuration::ephemeral_public().into()).await; + let response = Client::new(*env.bind_address()).get("scrape").await; assert_missing_query_params_for_scrape_request_error_response(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_when_the_info_hash_param_is_invalid() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; let mut params = QueryBuilder::default().query().params(); for invalid_value in &invalid_info_hashes() { params.set_one_info_hash_param(invalid_value); - let response = Client::new(*test_env.bind_address()).get(&format!("announce?{params}")).await; + let response = Client::new(*env.bind_address()).get(&format!("announce?{params}")).await; assert_cannot_parse_query_params_error_response(response, "").await; } - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_return_the_file_with_the_incomplete_peer_when_there_is_one_peer_with_bytes_pending_to_download() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; + env.add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ); - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -930,26 +965,24 @@ mod for_all_config_modes { assert_scrape_response(response, &expected_scrape_response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_return_the_file_with_the_complete_peer_when_there_is_one_peer_with_no_bytes_pending_to_download() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_no_bytes_pending_to_download() - .build(), - ) - .await; + env.add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_no_bytes_pending_to_download() + .build(), + ); - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -970,16 +1003,16 @@ mod for_all_config_modes { assert_scrape_response(response, &expected_scrape_response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_return_a_file_with_zeroed_values_when_there_are_no_peers() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -989,17 +1022,17 @@ mod for_all_config_modes { assert_scrape_response(response, &scrape::Response::with_one_file(info_hash.bytes(), File::zeroed())).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_accept_multiple_infohashes() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash1 = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); let info_hash2 = InfoHash::from_str("3b245504cf5f11bbdbe1201cea6a6bf45aee1bc0").unwrap(); - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .add_info_hash(&info_hash1) @@ -1015,16 +1048,16 @@ mod for_all_config_modes { assert_scrape_response(response, &expected_scrape_response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_increase_the_number_ot_tcp4_scrape_requests_handled_in_statistics() { - let test_env = running_test_environment::(configuration::ephemeral_mode_public()).await; + let env = Started::new(&configuration::ephemeral_public().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - Client::new(*test_env.bind_address()) + Client::new(*env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1032,13 +1065,13 @@ mod for_all_config_modes { ) .await; - let stats = test_env.tracker.get_stats().await; + let stats = env.tracker.get_stats().await; assert_eq!(stats.tcp4_scrapes_handled, 1); drop(stats); - test_env.stop().await; + env.stop().await; } #[tokio::test] @@ -1050,11 +1083,11 @@ mod for_all_config_modes { return; // we cannot bind to a ipv6 socket, so we will skip this test } - let test_env = running_test_environment::(configuration::ephemeral_ipv6()).await; + let env = Started::new(&configuration::ephemeral_ipv6().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - Client::bind(*test_env.bind_address(), IpAddr::from_str("::1").unwrap()) + Client::bind(*env.bind_address(), IpAddr::from_str("::1").unwrap()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1062,13 +1095,13 @@ mod for_all_config_modes { ) .await; - let stats = test_env.tracker.get_stats().await; + let stats = env.tracker.get_stats().await; assert_eq!(stats.tcp6_scrapes_handled, 1); drop(stats); - test_env.stop().await; + env.stop().await; } } } @@ -1078,84 +1111,78 @@ mod configured_as_whitelisted { mod and_receiving_an_announce_request { use std::str::FromStr; - use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; + use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::servers::http::asserts::{assert_is_announce_response, assert_torrent_not_in_whitelist_error_response}; use crate::servers::http::client::Client; use crate::servers::http::requests::announce::QueryBuilder; - use crate::servers::http::test_environment::running_test_environment; - use crate::servers::http::v1::contract::V1; + use crate::servers::http::Started; #[tokio::test] async fn should_fail_if_the_torrent_is_not_in_the_whitelist() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; + let env = Started::new(&configuration::ephemeral_listed().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; assert_torrent_not_in_whitelist_error_response(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_allow_announcing_a_whitelisted_torrent() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; + let env = Started::new(&configuration::ephemeral_listed().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - test_env - .tracker + env.tracker .add_torrent_to_whitelist(&info_hash) .await .expect("should add the torrent to the whitelist"); - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; assert_is_announce_response(response).await; - test_env.stop().await; + env.stop().await; } } mod receiving_an_scrape_request { use std::str::FromStr; - use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; - use torrust_tracker::tracker::peer; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; - use crate::common::fixtures::PeerBuilder; use crate::servers::http::asserts::assert_scrape_response; use crate::servers::http::client::Client; - use crate::servers::http::requests; use crate::servers::http::responses::scrape::{File, ResponseBuilder}; - use crate::servers::http::test_environment::running_test_environment; - use crate::servers::http::v1::contract::V1; + use crate::servers::http::{requests, Started}; #[tokio::test] async fn should_return_the_zeroed_file_when_the_requested_file_is_not_whitelisted() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; + let env = Started::new(&configuration::ephemeral_listed().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; + env.add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ); - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1167,32 +1194,29 @@ mod configured_as_whitelisted { assert_scrape_response(response, &expected_scrape_response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_return_the_file_stats_when_the_requested_file_is_whitelisted() { - let test_env = running_test_environment::(configuration::ephemeral_mode_whitelisted()).await; + let env = Started::new(&configuration::ephemeral_listed().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; + env.add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ); - test_env - .tracker + env.tracker .add_torrent_to_whitelist(&info_hash) .await .expect("should add the torrent to the whitelist"); - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1213,7 +1237,7 @@ mod configured_as_whitelisted { assert_scrape_response(response, &expected_scrape_response).await; - test_env.stop().await; + env.stop().await; } } } @@ -1224,53 +1248,52 @@ mod configured_as_private { use std::str::FromStr; use std::time::Duration; - use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; - use torrust_tracker::tracker::auth::Key; + use torrust_tracker::core::auth::Key; + use torrust_tracker_primitives::info_hash::InfoHash; use torrust_tracker_test_helpers::configuration; use crate::servers::http::asserts::{assert_authentication_error_response, assert_is_announce_response}; use crate::servers::http::client::Client; use crate::servers::http::requests::announce::QueryBuilder; - use crate::servers::http::test_environment::running_test_environment; - use crate::servers::http::v1::contract::V1; + use crate::servers::http::Started; #[tokio::test] async fn should_respond_to_authenticated_peers() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + let env = Started::new(&configuration::ephemeral_private().into()).await; - let expiring_key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); + let expiring_key = env.tracker.generate_auth_key(Some(Duration::from_secs(60))).await.unwrap(); - let response = Client::authenticated(*test_env.bind_address(), expiring_key.key()) + let response = Client::authenticated(*env.bind_address(), expiring_key.key()) .announce(&QueryBuilder::default().query()) .await; assert_is_announce_response(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_if_the_peer_has_not_provided_the_authentication_key() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + let env = Started::new(&configuration::ephemeral_private().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .announce(&QueryBuilder::default().with_info_hash(&info_hash).query()) .await; assert_authentication_error_response(response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_fail_if_the_key_query_param_cannot_be_parsed() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + let env = Started::new(&configuration::ephemeral_private().into()).await; let invalid_key = "INVALID_KEY"; - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .get(&format!( "announce/{invalid_key}?info_hash=%81%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00%00&peer_addr=2.137.87.41&downloaded=0&uploaded=0&peer_id=-qB00000000000000001&port=17548&left=0&event=completed&compact=0" )) @@ -1281,18 +1304,18 @@ mod configured_as_private { #[tokio::test] async fn should_fail_if_the_peer_cannot_be_authenticated_with_the_provided_key() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + let env = Started::new(&configuration::ephemeral_private().into()).await; // The tracker does not have this key let unregistered_key = Key::from_str("YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ").unwrap(); - let response = Client::authenticated(*test_env.bind_address(), unregistered_key) + let response = Client::authenticated(*env.bind_address(), unregistered_key) .announce(&QueryBuilder::default().query()) .await; assert_authentication_error_response(response).await; - test_env.stop().await; + env.stop().await; } } @@ -1301,26 +1324,24 @@ mod configured_as_private { use std::str::FromStr; use std::time::Duration; - use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; - use torrust_tracker::tracker::auth::Key; - use torrust_tracker::tracker::peer; + use torrust_tracker::core::auth::Key; + use torrust_tracker_primitives::info_hash::InfoHash; + use torrust_tracker_primitives::peer; + use torrust_tracker_primitives::peer::fixture::PeerBuilder; use torrust_tracker_test_helpers::configuration; - use crate::common::fixtures::PeerBuilder; use crate::servers::http::asserts::{assert_authentication_error_response, assert_scrape_response}; use crate::servers::http::client::Client; - use crate::servers::http::requests; use crate::servers::http::responses::scrape::{File, ResponseBuilder}; - use crate::servers::http::test_environment::running_test_environment; - use crate::servers::http::v1::contract::V1; + use crate::servers::http::{requests, Started}; #[tokio::test] async fn should_fail_if_the_key_query_param_cannot_be_parsed() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + let env = Started::new(&configuration::ephemeral_private().into()).await; let invalid_key = "INVALID_KEY"; - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .get(&format!( "scrape/{invalid_key}?info_hash=%3B%24U%04%CF%5F%11%BB%DB%E1%20%1C%EAjk%F4Z%EE%1B%C0" )) @@ -1331,21 +1352,19 @@ mod configured_as_private { #[tokio::test] async fn should_return_the_zeroed_file_when_the_client_is_not_authenticated() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + let env = Started::new(&configuration::ephemeral_private().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; + env.add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ); - let response = Client::new(*test_env.bind_address()) + let response = Client::new(*env.bind_address()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1357,28 +1376,26 @@ mod configured_as_private { assert_scrape_response(response, &expected_scrape_response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] async fn should_return_the_real_file_stats_when_the_client_is_authenticated() { - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + let env = Started::new(&configuration::ephemeral_private().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; + env.add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ); - let expiring_key = test_env.tracker.generate_auth_key(Duration::from_secs(60)).await.unwrap(); + let expiring_key = env.tracker.generate_auth_key(Some(Duration::from_secs(60))).await.unwrap(); - let response = Client::authenticated(*test_env.bind_address(), expiring_key.key()) + let response = Client::authenticated(*env.bind_address(), expiring_key.key()) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1399,7 +1416,7 @@ mod configured_as_private { assert_scrape_response(response, &expected_scrape_response).await; - test_env.stop().await; + env.stop().await; } #[tokio::test] @@ -1407,23 +1424,21 @@ mod configured_as_private { // There is not authentication error // code-review: should this really be this way? - let test_env = running_test_environment::(configuration::ephemeral_mode_private()).await; + let env = Started::new(&configuration::ephemeral_private().into()).await; let info_hash = InfoHash::from_str("9c38422213e30bff212b30c360d26f9a02136422").unwrap(); - test_env - .add_torrent_peer( - &info_hash, - &PeerBuilder::default() - .with_peer_id(&peer::Id(*b"-qB00000000000000001")) - .with_bytes_pending_to_download(1) - .build(), - ) - .await; + env.add_torrent_peer( + &info_hash, + &PeerBuilder::default() + .with_peer_id(&peer::Id(*b"-qB00000000000000001")) + .with_bytes_pending_to_download(1) + .build(), + ); let false_key: Key = "YZSl4lMZupRuOpSRC3krIKR5BPB14nrJ".parse().unwrap(); - let response = Client::authenticated(*test_env.bind_address(), false_key) + let response = Client::authenticated(*env.bind_address(), false_key) .scrape( &requests::scrape::QueryBuilder::default() .with_one_info_hash(&info_hash) @@ -1435,7 +1450,7 @@ mod configured_as_private { assert_scrape_response(response, &expected_scrape_response).await; - test_env.stop().await; + env.stop().await; } } } diff --git a/tests/servers/mod.rs b/tests/servers/mod.rs index 7c30b6f40..65e9a665b 100644 --- a/tests/servers/mod.rs +++ b/tests/servers/mod.rs @@ -1,3 +1,4 @@ mod api; +pub mod health_check_api; mod http; mod udp; diff --git a/tests/servers/udp/client.rs b/tests/servers/udp/client.rs deleted file mode 100644 index d267adaba..000000000 --- a/tests/servers/udp/client.rs +++ /dev/null @@ -1,84 +0,0 @@ -use std::io::Cursor; -use std::sync::Arc; - -use aquatic_udp_protocol::{Request, Response}; -use tokio::net::UdpSocket; -use torrust_tracker::servers::udp::MAX_PACKET_SIZE; - -use crate::servers::udp::source_address; - -#[allow(clippy::module_name_repetitions)] -pub struct UdpClient { - pub socket: Arc, -} - -impl UdpClient { - pub async fn bind(local_address: &str) -> Self { - let socket = UdpSocket::bind(local_address).await.unwrap(); - Self { - socket: Arc::new(socket), - } - } - - pub async fn connect(&self, remote_address: &str) { - self.socket.connect(remote_address).await.unwrap(); - } - - pub async fn send(&self, bytes: &[u8]) -> usize { - self.socket.writable().await.unwrap(); - self.socket.send(bytes).await.unwrap() - } - - pub async fn receive(&self, bytes: &mut [u8]) -> usize { - self.socket.readable().await.unwrap(); - self.socket.recv(bytes).await.unwrap() - } -} - -/// Creates a new `UdpClient` connected to a Udp server -pub async fn new_udp_client_connected(remote_address: &str) -> UdpClient { - let port = 0; // Let OS choose an unused port. - let client = UdpClient::bind(&source_address(port)).await; - client.connect(remote_address).await; - client -} - -#[allow(clippy::module_name_repetitions)] -pub struct UdpTrackerClient { - pub udp_client: UdpClient, -} - -impl UdpTrackerClient { - pub async fn send(&self, request: Request) -> usize { - // Write request into a buffer - let request_buffer = vec![0u8; MAX_PACKET_SIZE]; - let mut cursor = Cursor::new(request_buffer); - - let request_data = match request.write(&mut cursor) { - Ok(()) => { - #[allow(clippy::cast_possible_truncation)] - let position = cursor.position() as usize; - let inner_request_buffer = cursor.get_ref(); - // Return slice which contains written request data - &inner_request_buffer[..position] - } - Err(e) => panic!("could not write request to bytes: {e}."), - }; - - self.udp_client.send(request_data).await - } - - pub async fn receive(&self) -> Response { - let mut response_buffer = [0u8; MAX_PACKET_SIZE]; - - let payload_size = self.udp_client.receive(&mut response_buffer).await; - - Response::from_bytes(&response_buffer[..payload_size], true).unwrap() - } -} - -/// Creates a new `UdpTrackerClient` connected to a Udp Tracker server -pub async fn new_udp_tracker_client_connected(remote_address: &str) -> UdpTrackerClient { - let udp_client = new_udp_client_connected(remote_address).await; - UdpTrackerClient { udp_client } -} diff --git a/tests/servers/udp/contract.rs b/tests/servers/udp/contract.rs index 3187d9871..e37ef7bf0 100644 --- a/tests/servers/udp/contract.rs +++ b/tests/servers/udp/contract.rs @@ -6,27 +6,30 @@ use core::panic; use aquatic_udp_protocol::{ConnectRequest, ConnectionId, Response, TransactionId}; -use torrust_tracker::servers::udp::MAX_PACKET_SIZE; +use torrust_tracker::shared::bit_torrent::tracker::udp::client::UdpTrackerClient; +use torrust_tracker::shared::bit_torrent::tracker::udp::MAX_PACKET_SIZE; +use torrust_tracker_configuration::DEFAULT_TIMEOUT; use torrust_tracker_test_helpers::configuration; use crate::servers::udp::asserts::is_error_response; -use crate::servers::udp::client::{new_udp_client_connected, UdpTrackerClient}; -use crate::servers::udp::test_environment::running_test_environment; +use crate::servers::udp::Started; fn empty_udp_request() -> [u8; MAX_PACKET_SIZE] { [0; MAX_PACKET_SIZE] } -fn empty_buffer() -> [u8; MAX_PACKET_SIZE] { - [0; MAX_PACKET_SIZE] -} - async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrackerClient) -> ConnectionId { let connect_request = ConnectRequest { transaction_id }; - client.send(connect_request.into()).await; + match client.send(connect_request.into()).await { + Ok(_) => (), + Err(err) => panic!("{err}"), + }; - let response = client.receive().await; + let response = match client.receive().await { + Ok(response) => response, + Err(err) => panic!("{err}"), + }; match response { Response::Connect(connect_response) => connect_response.connection_id, @@ -36,42 +39,65 @@ async fn send_connection_request(transaction_id: TransactionId, client: &UdpTrac #[tokio::test] async fn should_return_a_bad_request_response_when_the_client_sends_an_empty_request() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; + + let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { + Ok(udp_client) => udp_client, + Err(err) => panic!("{err}"), + }; - let client = new_udp_client_connected(&test_env.bind_address().to_string()).await; + match client.client.send(&empty_udp_request()).await { + Ok(_) => (), + Err(err) => panic!("{err}"), + }; - client.send(&empty_udp_request()).await; + let response = match client.client.receive().await { + Ok(response) => response, + Err(err) => panic!("{err}"), + }; - let mut buffer = empty_buffer(); - client.receive(&mut buffer).await; - let response = Response::from_bytes(&buffer, true).unwrap(); + let response = Response::parse_bytes(&response, true).unwrap(); assert!(is_error_response(&response, "bad request")); + + env.stop().await; } mod receiving_a_connection_request { use aquatic_udp_protocol::{ConnectRequest, TransactionId}; + use torrust_tracker::shared::bit_torrent::tracker::udp::client::UdpTrackerClient; + use torrust_tracker_configuration::DEFAULT_TIMEOUT; use torrust_tracker_test_helpers::configuration; use crate::servers::udp::asserts::is_connect_response; - use crate::servers::udp::client::new_udp_tracker_client_connected; - use crate::servers::udp::test_environment::running_test_environment; + use crate::servers::udp::Started; #[tokio::test] async fn should_return_a_connect_response() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; + let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { + Ok(udp_tracker_client) => udp_tracker_client, + Err(err) => panic!("{err}"), + }; let connect_request = ConnectRequest { - transaction_id: TransactionId(123), + transaction_id: TransactionId::new(123), + }; + + match client.send(connect_request.into()).await { + Ok(_) => (), + Err(err) => panic!("{err}"), }; - client.send(connect_request.into()).await; + let response = match client.receive().await { + Ok(response) => response, + Err(err) => panic!("{err}"), + }; - let response = client.receive().await; + assert!(is_connect_response(&response, TransactionId::new(123))); - assert!(is_connect_response(&response, TransactionId(123))); + env.stop().await; } } @@ -79,82 +105,137 @@ mod receiving_an_announce_request { use std::net::Ipv4Addr; use aquatic_udp_protocol::{ - AnnounceEvent, AnnounceRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, PeerKey, Port, - TransactionId, + AnnounceActionPlaceholder, AnnounceEvent, AnnounceRequest, ConnectionId, InfoHash, NumberOfBytes, NumberOfPeers, PeerId, + PeerKey, Port, TransactionId, }; + use torrust_tracker::shared::bit_torrent::tracker::udp::client::UdpTrackerClient; + use torrust_tracker_configuration::DEFAULT_TIMEOUT; use torrust_tracker_test_helpers::configuration; use crate::servers::udp::asserts::is_ipv4_announce_response; - use crate::servers::udp::client::new_udp_tracker_client_connected; use crate::servers::udp::contract::send_connection_request; - use crate::servers::udp::test_environment::running_test_environment; - - #[tokio::test] - async fn should_return_an_announce_response() { - let test_env = running_test_environment(configuration::ephemeral()).await; - - let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; - - let connection_id = send_connection_request(TransactionId(123), &client).await; + use crate::servers::udp::Started; + pub async fn send_and_get_announce(tx_id: TransactionId, c_id: ConnectionId, client: &UdpTrackerClient) { // Send announce request let announce_request = AnnounceRequest { - connection_id: ConnectionId(connection_id.0), - transaction_id: TransactionId(123i32), + connection_id: ConnectionId(c_id.0), + action_placeholder: AnnounceActionPlaceholder::default(), + transaction_id: tx_id, info_hash: InfoHash([0u8; 20]), peer_id: PeerId([255u8; 20]), - bytes_downloaded: NumberOfBytes(0i64), - bytes_uploaded: NumberOfBytes(0i64), - bytes_left: NumberOfBytes(0i64), - event: AnnounceEvent::Started, - ip_address: Some(Ipv4Addr::new(0, 0, 0, 0)), - key: PeerKey(0u32), - peers_wanted: NumberOfPeers(1i32), - port: Port(client.udp_client.socket.local_addr().unwrap().port()), + bytes_downloaded: NumberOfBytes(0i64.into()), + bytes_uploaded: NumberOfBytes(0i64.into()), + bytes_left: NumberOfBytes(0i64.into()), + event: AnnounceEvent::Started.into(), + ip_address: Ipv4Addr::new(0, 0, 0, 0).into(), + key: PeerKey::new(0i32), + peers_wanted: NumberOfPeers(1i32.into()), + port: Port(client.client.socket.local_addr().unwrap().port().into()), + }; + + match client.send(announce_request.into()).await { + Ok(_) => (), + Err(err) => panic!("{err}"), }; - client.send(announce_request.into()).await; + let response = match client.receive().await { + Ok(response) => response, + Err(err) => panic!("{err}"), + }; - let response = client.receive().await; + println!("test response {response:?}"); assert!(is_ipv4_announce_response(&response)); } + + #[tokio::test] + async fn should_return_an_announce_response() { + let env = Started::new(&configuration::ephemeral().into()).await; + + let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { + Ok(udp_tracker_client) => udp_tracker_client, + Err(err) => panic!("{err}"), + }; + + let tx_id = TransactionId::new(123); + + let c_id = send_connection_request(tx_id, &client).await; + + send_and_get_announce(tx_id, c_id, &client).await; + + env.stop().await; + } + + #[tokio::test] + async fn should_return_many_announce_response() { + let env = Started::new(&configuration::ephemeral().into()).await; + + let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { + Ok(udp_tracker_client) => udp_tracker_client, + Err(err) => panic!("{err}"), + }; + + let tx_id = TransactionId::new(123); + + let c_id = send_connection_request(tx_id, &client).await; + + for x in 0..1000 { + tracing::info!("req no: {x}"); + send_and_get_announce(tx_id, c_id, &client).await; + } + + env.stop().await; + } } mod receiving_an_scrape_request { use aquatic_udp_protocol::{ConnectionId, InfoHash, ScrapeRequest, TransactionId}; + use torrust_tracker::shared::bit_torrent::tracker::udp::client::UdpTrackerClient; + use torrust_tracker_configuration::DEFAULT_TIMEOUT; use torrust_tracker_test_helpers::configuration; use crate::servers::udp::asserts::is_scrape_response; - use crate::servers::udp::client::new_udp_tracker_client_connected; use crate::servers::udp::contract::send_connection_request; - use crate::servers::udp::test_environment::running_test_environment; + use crate::servers::udp::Started; #[tokio::test] async fn should_return_a_scrape_response() { - let test_env = running_test_environment(configuration::ephemeral()).await; + let env = Started::new(&configuration::ephemeral().into()).await; - let client = new_udp_tracker_client_connected(&test_env.bind_address().to_string()).await; + let client = match UdpTrackerClient::new(env.bind_address(), DEFAULT_TIMEOUT).await { + Ok(udp_tracker_client) => udp_tracker_client, + Err(err) => panic!("{err}"), + }; - let connection_id = send_connection_request(TransactionId(123), &client).await; + let connection_id = send_connection_request(TransactionId::new(123), &client).await; // Send scrape request // Full scrapes are not allowed you need to pass an array of info hashes otherwise // it will return "bad request" error with empty vector - let info_hashes = vec![InfoHash([0u8; 20])]; + + let empty_info_hash = vec![InfoHash([0u8; 20])]; let scrape_request = ScrapeRequest { connection_id: ConnectionId(connection_id.0), - transaction_id: TransactionId(123i32), - info_hashes, + transaction_id: TransactionId::new(123i32), + info_hashes: empty_info_hash, }; - client.send(scrape_request.into()).await; + match client.send(scrape_request.into()).await { + Ok(_) => (), + Err(err) => panic!("{err}"), + }; - let response = client.receive().await; + let response = match client.receive().await { + Ok(response) => response, + Err(err) => panic!("{err}"), + }; assert!(is_scrape_response(&response)); + + env.stop().await; } } diff --git a/tests/servers/udp/environment.rs b/tests/servers/udp/environment.rs new file mode 100644 index 000000000..cfc4390c9 --- /dev/null +++ b/tests/servers/udp/environment.rs @@ -0,0 +1,103 @@ +use std::net::SocketAddr; +use std::sync::Arc; + +use torrust_tracker::bootstrap::app::initialize_with_configuration; +use torrust_tracker::core::Tracker; +use torrust_tracker::servers::registar::Registar; +use torrust_tracker::servers::udp::server::spawner::Spawner; +use torrust_tracker::servers::udp::server::states::{Running, Stopped}; +use torrust_tracker::servers::udp::server::Server; +use torrust_tracker_configuration::{Configuration, UdpTracker, DEFAULT_TIMEOUT}; +use torrust_tracker_primitives::info_hash::InfoHash; +use torrust_tracker_primitives::peer; + +pub struct Environment { + pub config: Arc, + pub tracker: Arc, + pub registar: Registar, + pub server: Server, +} + +impl Environment { + /// Add a torrent to the tracker + #[allow(dead_code)] + pub fn add_torrent(&self, info_hash: &InfoHash, peer: &peer::Peer) { + self.tracker.upsert_peer_and_get_stats(info_hash, peer); + } +} + +impl Environment { + #[allow(dead_code)] + pub fn new(configuration: &Arc) -> Self { + let tracker = initialize_with_configuration(configuration); + + let udp_tracker = configuration.udp_trackers.clone().expect("missing UDP tracker configuration"); + + let config = Arc::new(udp_tracker[0].clone()); + + let bind_to = config.bind_address; + + let server = Server::new(Spawner::new(bind_to)); + + Self { + config, + tracker, + registar: Registar::default(), + server, + } + } + + #[allow(dead_code)] + pub async fn start(self) -> Environment { + Environment { + config: self.config, + tracker: self.tracker.clone(), + registar: self.registar.clone(), + server: self.server.start(self.tracker, self.registar.give_form()).await.unwrap(), + } + } +} + +impl Environment { + pub async fn new(configuration: &Arc) -> Self { + tokio::time::timeout(DEFAULT_TIMEOUT, Environment::::new(configuration).start()) + .await + .expect("it should create an environment within the timeout") + } + + #[allow(dead_code)] + pub async fn stop(self) -> Environment { + let stopped = tokio::time::timeout(DEFAULT_TIMEOUT, self.server.stop()) + .await + .expect("it should stop the environment within the timeout"); + + Environment { + config: self.config, + tracker: self.tracker, + registar: Registar::default(), + server: stopped.expect("it stop the udp tracker service"), + } + } + + pub fn bind_address(&self) -> SocketAddr { + self.server.state.binding + } +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use tokio::time::sleep; + use torrust_tracker_test_helpers::configuration; + + use crate::servers::udp::Started; + + #[tokio::test] + async fn it_should_make_and_stop_udp_server() { + let env = Started::new(&configuration::ephemeral().into()).await; + sleep(Duration::from_secs(1)).await; + env.stop().await; + sleep(Duration::from_secs(1)).await; + } +} diff --git a/tests/servers/udp/mod.rs b/tests/servers/udp/mod.rs index d39c37153..7eea8683f 100644 --- a/tests/servers/udp/mod.rs +++ b/tests/servers/udp/mod.rs @@ -1,9 +1,7 @@ +use torrust_tracker::servers::udp::server::states::Running; + pub mod asserts; -pub mod client; pub mod contract; -pub mod test_environment; +pub mod environment; -/// Generates the source address for the UDP client -fn source_address(port: u16) -> String { - format!("127.0.0.1:{port}") -} +pub type Started = environment::Environment; diff --git a/tests/servers/udp/test_environment.rs b/tests/servers/udp/test_environment.rs deleted file mode 100644 index 15266d881..000000000 --- a/tests/servers/udp/test_environment.rs +++ /dev/null @@ -1,100 +0,0 @@ -use std::net::SocketAddr; -use std::sync::Arc; - -use torrust_tracker::servers::udp::server::{RunningUdpServer, StoppedUdpServer, UdpServer}; -use torrust_tracker::shared::bit_torrent::info_hash::InfoHash; -use torrust_tracker::tracker::peer::Peer; -use torrust_tracker::tracker::Tracker; - -use crate::common::app::setup_with_configuration; - -#[allow(clippy::module_name_repetitions, dead_code)] -pub type StoppedTestEnvironment = TestEnvironment; -#[allow(clippy::module_name_repetitions)] -pub type RunningTestEnvironment = TestEnvironment; - -pub struct TestEnvironment { - pub cfg: Arc, - pub tracker: Arc, - pub state: S, -} - -#[allow(dead_code)] -pub struct Stopped { - udp_server: StoppedUdpServer, -} - -pub struct Running { - udp_server: RunningUdpServer, -} - -impl TestEnvironment { - /// Add a torrent to the tracker - #[allow(dead_code)] - pub async fn add_torrent(&self, info_hash: &InfoHash, peer: &Peer) { - self.tracker.update_torrent_with_peer_and_get_stats(info_hash, peer).await; - } -} - -impl TestEnvironment { - #[allow(dead_code)] - pub fn new_stopped(cfg: torrust_tracker_configuration::Configuration) -> Self { - let cfg = Arc::new(cfg); - - let tracker = setup_with_configuration(&cfg); - - let udp_server = udp_server(cfg.udp_trackers[0].clone()); - - Self { - cfg, - tracker, - state: Stopped { udp_server }, - } - } - - #[allow(dead_code)] - pub async fn start(self) -> TestEnvironment { - TestEnvironment { - cfg: self.cfg, - tracker: self.tracker.clone(), - state: Running { - udp_server: self.state.udp_server.start(self.tracker).await.unwrap(), - }, - } - } -} - -impl TestEnvironment { - pub async fn new_running(cfg: torrust_tracker_configuration::Configuration) -> Self { - StoppedTestEnvironment::new_stopped(cfg).start().await - } - - #[allow(dead_code)] - pub async fn stop(self) -> TestEnvironment { - TestEnvironment { - cfg: self.cfg, - tracker: self.tracker, - state: Stopped { - udp_server: self.state.udp_server.stop().await.unwrap(), - }, - } - } - - pub fn bind_address(&self) -> SocketAddr { - self.state.udp_server.state.bind_address - } -} - -#[allow(clippy::module_name_repetitions, dead_code)] -pub fn stopped_test_environment(cfg: torrust_tracker_configuration::Configuration) -> StoppedTestEnvironment { - TestEnvironment::new_stopped(cfg) -} - -#[allow(clippy::module_name_repetitions)] -pub async fn running_test_environment(cfg: torrust_tracker_configuration::Configuration) -> RunningTestEnvironment { - TestEnvironment::new_running(cfg).await -} - -pub fn udp_server(cfg: torrust_tracker_configuration::UdpTracker) -> StoppedUdpServer { - UdpServer::new(cfg) -} diff --git a/tests/wrk_benchmark_announce.lua b/tests/wrk_benchmark_announce.lua deleted file mode 100644 index c182f8e68..000000000 --- a/tests/wrk_benchmark_announce.lua +++ /dev/null @@ -1,53 +0,0 @@ --- else the randomness would be the same every run -math.randomseed(os.time()) - -local charset = "0123456789ABCDEF" - -function hexToChar(hex) - local n = tonumber(hex, 16) - local f = string.char(n) - return f -end - -function hexStringToCharString(hex) - local ret = {} - local r - for i = 0, 19 do - local x = i * 2 - r = hex:sub(x+1, x+2) - local f = hexToChar(r) - table.insert(ret, f) - end - return table.concat(ret) -end - -function urlEncode(str) - str = string.gsub (str, "([^0-9a-zA-Z !'()*._~-])", -- locale independent - function (c) return string.format ("%%%02X", string.byte(c)) end) - str = string.gsub (str, " ", "+") - return str -end - -function genHexString(length) - local ret = {} - local r - for i = 1, length do - r = math.random(1, #charset) - table.insert(ret, charset:sub(r, r)) - end - return table.concat(ret) -end - -function randomInfoHash() - local hexString = genHexString(40) - local str = hexStringToCharString(hexString) - return urlEncode(str) -end - --- the request function that will run at each request -request = function() - path = "/announce?info_hash=" .. randomInfoHash() .. "&peer_id=-lt0D80-a%D4%10%19%99%A6yh%9A%E1%CD%96&port=54434&uploaded=885&downloaded=0&left=0&corrupt=0&key=A78381BD&numwant=200&compact=1&no_peer_id=1&supportcrypto=1&redundant=0" - headers = {} - headers["X-Forwarded-For"] = "1.1.1.1" - return wrk.format("GET", path, headers) -end