summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.github/dependabot.yml10
-rw-r--r--.github/workflows/ci.yml100
-rw-r--r--.github/workflows/commit-lint.yml18
-rw-r--r--.github/workflows/flake-update.yml28
-rw-r--r--.gitlint138
-rw-r--r--Cargo.toml21
-rw-r--r--README.md42
-rw-r--r--bors.toml9
-rw-r--r--rustfmt.toml1
-rw-r--r--src/async_dag.rs542
-rw-r--r--src/dag_backend.rs106
-rw-r--r--src/error.rs11
-rw-r--r--src/id.rs18
-rw-r--r--src/lib.rs20
-rw-r--r--src/node.rs36
-rw-r--r--src/node_id.rs10
-rw-r--r--src/repository.rs26
-rw-r--r--src/test_impl.rs79
18 files changed, 1130 insertions, 85 deletions
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 0000000..3b65667
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,10 @@
+version: 2
+updates:
+- package-ecosystem: cargo
+ directory: "/"
+ schedule:
+ interval: monthly
+- package-ecosystem: github-actions
+ directory: "/"
+ schedule:
+ interval: monthly
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 0000000..5a93167
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,100 @@
+name: CI
+
+on:
+ push:
+ branches: [master, staging, trying, release-*]
+ pull_request:
+ branches: [master, release-*]
+
+env:
+ CARGO_TERM_COLOR: always
+
+jobs:
+ check:
+ name: check
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ rust:
+ - 1.63.0
+ - stable
+ - beta
+ # - nightly
+
+ steps:
+ - name: Checkout sources
+ uses: actions/checkout@v3
+ - name: Install toolchain
+ uses: dtolnay/rust-toolchain@master
+ with:
+ toolchain: ${{ matrix.rust }}
+ - uses: swatinem/rust-cache@v2
+ - name: cargo-check
+ run: cargo check --all-features
+
+
+ fmt:
+ name: format
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v3
+ - uses: dtolnay/rust-toolchain@master
+ with:
+ toolchain: 1.63.0
+ components: rustfmt
+ - name: cargo-fmt
+ run: cargo fmt -- --check
+
+
+ test:
+ name: test
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ rust:
+ - 1.63.0
+ - stable
+ - beta
+ # - nightly
+ steps:
+ - name: Checkout sources
+ uses: actions/checkout@v3
+ - name: Install toolchain
+ uses: dtolnay/rust-toolchain@master
+ with:
+ toolchain: ${{ matrix.rust }}
+ - uses: swatinem/rust-cache@v2
+ - name: cargo-test
+ run: cargo test --all --all-features
+
+
+ clippy:
+ name: clippy
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v3
+ - uses: dtolnay/rust-toolchain@master
+ with:
+ toolchain: 1.63.0
+ components: clippy
+ - uses: swatinem/rust-cache@v2
+ - name: cargo-clippy
+ run: cargo clippy --all --all-targets --all-features -- -D warnings
+
+
+ # We need some "accummulation" job here because bors fails (timeouts) to
+ # listen on matrix builds.
+ # Hence, we have some kind of dummy here that bors can listen on
+ ci-success:
+ name: CI
+ if: ${{ success() }}
+ needs:
+ - check
+ - clippy
+ - fmt
+ - test
+ runs-on: ubuntu-latest
+ steps:
+ - name: CI succeeded
+ run: exit 0
diff --git a/.github/workflows/commit-lint.yml b/.github/workflows/commit-lint.yml
new file mode 100644
index 0000000..309d637
--- /dev/null
+++ b/.github/workflows/commit-lint.yml
@@ -0,0 +1,18 @@
+on:
+ pull_request:
+
+name: Pull Request Checks
+
+jobs:
+ commit-lint:
+ runs-on: ubuntu-latest
+ if: github.event_name == 'pull_request'
+ steps:
+ - uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+ - uses: actions/setup-python@v4
+ with:
+ python-version: '3.x'
+ - run: pip install gitlint
+ - run: gitlint --commits $(git merge-base origin/master HEAD)..HEAD
diff --git a/.github/workflows/flake-update.yml b/.github/workflows/flake-update.yml
new file mode 100644
index 0000000..efe57c5
--- /dev/null
+++ b/.github/workflows/flake-update.yml
@@ -0,0 +1,28 @@
+name: "Update flakes"
+
+on:
+ repository_dispatch:
+ workflow_dispatch:
+ schedule:
+ # 01:15 every monday
+ - cron: '15 1 * * 1'
+
+jobs:
+ lockfile:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v3
+ - name: Install Nix
+ uses: cachix/install-nix-action@v22
+ with:
+ extra_nix_config: |
+ access-tokens = github.com=${{ secrets.GITHUB_TOKEN }}
+ - name: Update flake.lock
+ uses: DeterminateSystems/update-flake-lock@v20
+ with:
+ pr-title: "Update flake.lock" # Title of PR to be created
+ # pr-labels: | # Labels to be set on the PR
+ # dependencies
+ # automated
+
diff --git a/.gitlint b/.gitlint
new file mode 100644
index 0000000..2844509
--- /dev/null
+++ b/.gitlint
@@ -0,0 +1,138 @@
+# Edit this file as you like.
+#
+# All these sections are optional. Each section with the exception of [general] represents
+# one rule and each key in it is an option for that specific rule.
+#
+# Rules and sections can be referenced by their full name or by id. For example
+# section "[body-max-line-length]" could also be written as "[B1]". Full section names are
+# used in here for clarity.
+#
+[general]
+# Ignore certain rules, this example uses both full name and id
+ignore=body-is-missing
+
+# verbosity should be a value between 1 and 3, the commandline -v flags take precedence over this
+# verbosity = 2
+
+# By default gitlint will ignore merge, revert, fixup and squash commits.
+ignore-merge-commits=true
+# ignore-revert-commits=true
+ignore-fixup-commits=false
+ignore-squash-commits=false
+
+# Ignore any data send to gitlint via stdin
+# ignore-stdin=true
+
+# Fetch additional meta-data from the local repository when manually passing a
+# commit message to gitlint via stdin or --commit-msg. Disabled by default.
+# staged=true
+
+# Hard fail when the target commit range is empty. Note that gitlint will
+# already fail by default on invalid commit ranges. This option is specifically
+# to tell gitlint to fail on *valid but empty* commit ranges.
+# Disabled by default.
+# fail-without-commits=true
+
+# Enable debug mode (prints more output). Disabled by default.
+# debug=true
+
+# Enable community contributed rules
+# See http://jorisroovers.github.io/gitlint/contrib_rules for details
+contrib=CC1, CC2
+
+# Set the extra-path where gitlint will search for user defined rules
+# See http://jorisroovers.github.io/gitlint/user_defined_rules for details
+# extra-path=examples/
+
+# This is an example of how to configure the "title-max-length" rule and
+# set the line-length it enforces to 50
+# [title-max-length]
+# line-length=50
+
+# Conversely, you can also enforce minimal length of a title with the
+# "title-min-length" rule:
+# [title-min-length]
+# min-length=5
+
+# [title-must-not-contain-word]
+# Comma-separated list of words that should not occur in the title. Matching is case
+# insensitive. It's fine if the keyword occurs as part of a larger word (so "WIPING"
+# will not cause a violation, but "WIP: my title" will.
+# words=wip
+
+# [title-match-regex]
+# python-style regex that the commit-msg title must match
+# Note that the regex can contradict with other rules if not used correctly
+# (e.g. title-must-not-contain-word).
+# regex=^US[0-9]*
+
+# [body-max-line-length]
+# line-length=72
+
+# [body-min-length]
+# min-length=5
+
+# [body-is-missing]
+# Whether to ignore this rule on merge commits (which typically only have a title)
+# default = True
+# ignore-merge-commits=false
+
+# [body-changed-file-mention]
+# List of files that need to be explicitly mentioned in the body when they are changed
+# This is useful for when developers often erroneously edit certain files or git submodules.
+# By specifying this rule, developers can only change the file when they explicitly reference
+# it in the commit message.
+# files=gitlint-core/gitlint/rules.py,README.md
+
+# [body-match-regex]
+# python-style regex that the commit-msg body must match.
+# E.g. body must end in My-Commit-Tag: foo
+# regex=My-Commit-Tag: foo$
+
+# [author-valid-email]
+# python-style regex that the commit author email address must match.
+# For example, use the following regex if you only want to allow email addresses from foo.com
+# regex=[^@]+@foo.com
+
+# [ignore-by-title]
+# Ignore certain rules for commits of which the title matches a regex
+# E.g. Match commit titles that start with "Release"
+# regex=^Release(.*)
+
+# Ignore certain rules, you can reference them by their id or by their full name
+# Use 'all' to ignore all rules
+# ignore=T1,body-min-length
+
+# [ignore-by-body]
+# Ignore certain rules for commits of which the body has a line that matches a regex
+# E.g. Match bodies that have a line that that contain "release"
+# regex=(.*)release(.*)
+#
+# Ignore certain rules, you can reference them by their id or by their full name
+# Use 'all' to ignore all rules
+# ignore=T1,body-min-length
+
+# [ignore-body-lines]
+# Ignore certain lines in a commit body that match a regex.
+# E.g. Ignore all lines that start with 'Co-Authored-By'
+# regex=^Co-Authored-By
+
+[ignore-by-author-name]
+# Ignore certain rules for commits of which the author name matches a regex
+# E.g. Match commits made by dependabot
+regex=(.*)dependabot(.*)
+#
+# Ignore certain rules, you can reference them by their id or by their full name
+# Use 'all' to ignore all rules
+# ignore=T1,body-min-length
+
+# This is a contrib rule - a community contributed rule. These are disabled by default.
+# You need to explicitly enable them one-by-one by adding them to the "contrib" option
+# under [general] section above.
+# [contrib-title-conventional-commits]
+# Specify allowed commit types. For details see: https://www.conventionalcommits.org/
+# types = bugfix,user-story,epic
+[contrib-body-requires-signed-off-by]
+
+[contrib-disallow-cleanup-commits]
+
diff --git a/Cargo.toml b/Cargo.toml
index b460961..149e191 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -1,8 +1,19 @@
[package]
-name = "daglib"
-version = "0.1.0"
-authors = ["Matthias Beyer <mail@beyermatthias.de>"]
-edition = "2018"
+name = "daglib"
+version = "0.1.0"
+authors = ["Matthias Beyer <mail@beyermatthias.de>"]
+edition = "2021"
+description = "Generic async DAG library"
+
+license = "MPL-2.0"
+repository = "https://github.com/matthiasbeyer/daglib"
+documentation = "https://docs.rs/daglib"
[dependencies]
-futures = "0.1"
+async-trait = "0.1"
+futures = "0.3"
+thiserror = "1"
+
+[dev-dependencies]
+tokio-test = "0.4"
+
diff --git a/README.md b/README.md
index 7923f86..50bad1a 100644
--- a/README.md
+++ b/README.md
@@ -1,13 +1,51 @@
# daglib
-Generic library for working with DAG data structures. Based on futures, so that
-it can be used in a async context.
+Async DAG library generic over Node type, Node Identifier Type and Backend
+(storage of the DAG).
+
## Status
Experimental. Do not use.
+## What is this?
+
+My Problem was, that I wanted to implement a DAG on IPFS/IPLD. The Problem,
+though, is that the existing IPFS/IPLD libraries do not feature functionality to
+build a DAG easily.
+There are libraries for DAGs, but they all implemented the storage functionality
+themselves. I needed a backend that is _async_ and _generic_, so that I can use
+IPFS (or anything else that works async) in the backend. So this lib was born.
+
+This library does define a simple interface for the underlying backend:
+
+```rust
+#[async_trait]
+pub trait DagBackend<Id, N>
+ where N: Node,
+ Id: NodeId + Send
+{
+ async fn get(&self, id: Id) -> Result<Option<N>>;
+ async fn put(&mut self, node: N) -> Result<Id>;
+}
+```
+
+and that's it. The `AsyncDag` type implements the DAG data structure on top of
+that.
+
+
+## Limitations
+
+Because this library was initialized to be used with IPFS in the backend, is has
+some limitations:
+
+* It does assume that you cannot ever delete nodes. You can
+ rewrite the DAG, but this does not necessarily delete nodes.
+
+This list will be extended in the future. Do not assume it is complete!
+
+
## License
MPL-2.0
diff --git a/bors.toml b/bors.toml
new file mode 100644
index 0000000..c50591c
--- /dev/null
+++ b/bors.toml
@@ -0,0 +1,9 @@
+# Must pass on the merge with the master branch
+status = [
+ "CI"
+]
+
+cut_body_after = "<details>"
+
+delete_merged_branches = true
+
diff --git a/rustfmt.toml b/rustfmt.toml
new file mode 100644
index 0000000..36755d2
--- /dev/null
+++ b/rustfmt.toml
@@ -0,0 +1 @@
+# default
diff --git a/src/async_dag.rs b/src/async_dag.rs
new file mode 100644
index 0000000..7abc6c5
--- /dev/null
+++ b/src/async_dag.rs
@@ -0,0 +1,542 @@
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+//
+
+use std::pin::Pin;
+
+use futures::stream::StreamExt;
+use futures::task::Poll;
+
+use crate::DagBackend;
+use crate::Node;
+use crate::NodeId;
+
+/// An async DAG, generic over Node, Node identifier and Backend implementation
+pub struct AsyncDag<Id, N, Backend>
+where
+ Id: NodeId + Send,
+ N: Node<Id = Id>,
+ Backend: DagBackend<Id, N>,
+{
+ head: Id,
+ backend: Backend,
+ _node: std::marker::PhantomData<N>,
+}
+
+impl<Id, N, Backend, Error> AsyncDag<Id, N, Backend>
+where
+ Id: NodeId + Send,
+ N: Node<Id = Id>,
+ Backend: DagBackend<Id, N, Error = Error>,
+ Error: From<crate::Error<Id>>,
+{
+ /// Create a new DAG with a backend and a HEAD node
+ ///
+ /// The head node is `DagBackend::put` into the backend before the AsyncDag object is created.
+ pub async fn new(mut backend: Backend, head: N) -> Result<Self, Error> {
+ backend.put(head).await.map(|id| AsyncDag {
+ head: id,
+ backend,
+ _node: std::marker::PhantomData,
+ })
+ }
+
+ /// Load a AsyncDag object using `head` as HEAD node.
+ ///
+ /// # Warning
+ ///
+ /// This fails if backend.get(head) fails.
+ pub async fn load(backend: Backend, head: Id) -> Result<Self, Error> {
+ backend
+ .get(head.clone())
+ .await?
+ .map(|(id, _)| AsyncDag {
+ head: id,
+ backend,
+ _node: std::marker::PhantomData,
+ })
+ .ok_or_else(|| crate::Error::NodeNotFound(head).into())
+ }
+
+ pub fn head(&self) -> &Id {
+ &self.head
+ }
+
+ pub fn backend(&self) -> &Backend {
+ &self.backend
+ }
+
+ pub fn backend_mut(&mut self) -> &mut Backend {
+ &mut self.backend
+ }
+
+ /// Check whether an `id` is in the DAG.
+ pub async fn has_id(&self, id: &Id) -> Result<bool, Error> {
+ self.stream()
+ .map(|r| -> Result<bool, _> { r.map(|(ref node_id, _)| node_id == id) })
+ .collect::<Vec<Result<bool, _>>>()
+ .await
+ .into_iter()
+ .fold(Ok(false), |acc, e| match (acc, e) {
+ (Err(e), _) => Err(e),
+ (Ok(_), Err(e)) => Err(e),
+ (Ok(a), Ok(b)) => Ok(a || b),
+ })
+ }
+
+ /// Iterate over the DAG
+ ///
+ /// This function returns a Stream over all nodes in the DAG.
+ ///
+ /// # Warning
+ ///
+ /// The order of the nodes is not (yet) guaranteed.
+ pub fn stream(&self) -> Stream<Id, N, Backend, Error> {
+ Stream {
+ dag: self,
+ backlog: {
+ let mut v = Vec::with_capacity(2);
+ v.push(self.backend.get(self.head.clone()));
+ v
+ },
+ }
+ }
+
+ /// Update the HEAD pointer of the DAG
+ ///
+ /// # Warning
+ ///
+ /// fails if the passed node does not point to the current HEAD in its parents.
+ pub async fn update_head(&mut self, node: N) -> Result<Id, Error> {
+ if node.parent_ids().iter().any(|id| id == &self.head) {
+ self.update_head_unchecked(node).await
+ } else {
+ Err(Error::from(crate::Error::HeadNotAParent))
+ }
+ }
+
+ /// Update the HEAD pointer of the DAG, unchecked
+ ///
+ /// # Warning
+ ///
+ /// Does not check whether the passed `node` does point to the current (then old) HEAD in its
+ /// parents. Be careful to not lose nodes from the DAG with this.
+ pub async fn update_head_unchecked(&mut self, node: N) -> Result<Id, Error> {
+ let id = self.backend.put(node).await?;
+ self.head = id.clone();
+ Ok(id)
+ }
+
+ /// Branch from the current head.
+ ///
+ /// This function creates a new AsyncDag object with the same backend (thus the backend must be
+ /// `Clone` in this case).
+ pub fn branch(&self) -> AsyncDag<Id, N, Backend>
+ where
+ Backend: Clone,
+ {
+ AsyncDag {
+ head: self.head.clone(),
+ backend: self.backend.clone(),
+ _node: std::marker::PhantomData,
+ }
+ }
+
+ /// Merge another AsyncDag into this one
+ ///
+ /// Use the `merger` function to merge the two head IDs and generate a new Node instance for
+ /// the new HEAD of `self`.
+ pub async fn merge<M>(
+ &mut self,
+ other: &AsyncDag<Id, N, Backend>,
+ merger: M,
+ ) -> Result<Id, Error>
+ where
+ M: Merger<Id, N, Error = Error>,
+ {
+ let node = merger.create_merge_node(&self.head, &other.head)?;
+ let id = self.backend.put(node).await?;
+ self.head = id.clone();
+ Ok(id)
+ }
+}
+
+pub trait Merger<Id, N>
+where
+ Id: NodeId,
+ N: Node<Id = Id>,
+{
+ type Error;
+
+ fn create_merge_node(&self, left_id: &Id, right_id: &Id) -> Result<N, Self::Error>;
+}
+
+impl<Id, N, F, Error> Merger<Id, N> for F
+where
+ Id: NodeId,
+ N: Node<Id = Id>,
+ F: Fn(&Id, &Id) -> Result<N, Error>,
+ Error: From<crate::Error<Id>>,
+{
+ type Error = Error;
+
+ fn create_merge_node(&self, left_id: &Id, right_id: &Id) -> Result<N, Self::Error> {
+ (self)(left_id, right_id)
+ }
+}
+
+/// Stream adapter for streaming all nodes in a DAG
+pub struct Stream<'a, Id, N, Backend, Error>
+where
+ Id: NodeId + Send,
+ N: Node<Id = Id>,
+ Backend: DagBackend<Id, N>,
+ Error: From<crate::Error<Id>>,
+{
+ dag: &'a AsyncDag<Id, N, Backend>,
+ backlog: Vec<Pin<Backlog<'a, Id, N, Error>>>,
+}
+
+pub type Backlog<'a, Id, N, Error> = Box<
+ (dyn futures::future::Future<Output = Result<Option<(Id, N)>, Error>> + std::marker::Send + 'a),
+>;
+
+impl<'a, Id, N, Backend, Error> futures::stream::Stream for Stream<'a, Id, N, Backend, Error>
+where
+ Id: NodeId + Send,
+ N: Node<Id = Id>,
+ Backend: DagBackend<Id, N, Error = Error>,
+ Error: From<crate::Error<Id>>,
+{
+ type Item = Result<(Id, N), Error>;
+
+ fn poll_next(
+ mut self: std::pin::Pin<&mut Self>,
+ cx: &mut futures::task::Context<'_>,
+ ) -> futures::task::Poll<Option<Self::Item>> {
+ if let Some(mut fut) = self.as_mut().backlog.pop() {
+ match fut.as_mut().poll(cx) {
+ Poll::Ready(Err(e)) => Poll::Ready(Some(Err(e))),
+ Poll::Ready(Ok(Some((node_id, node)))) => {
+ for parent in node.parent_ids().into_iter() {
+ let fut = self.dag.backend.get(parent.clone());
+ self.as_mut().backlog.push(fut);
+ }
+ Poll::Ready(Some(Ok((node_id, node))))
+ }
+ Poll::Ready(Ok(None)) => {
+ // backend.get() returned Ok(None), so the referenced node seems not to exist
+ //
+ // TODO: Decide whether we should return an error here.
+ cx.waker().wake_by_ref();
+ Poll::Pending
+ }
+ Poll::Pending => {
+ cx.waker().wake_by_ref();
+ Poll::Pending
+ }
+ }
+ } else {
+ Poll::Ready(None)
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use futures::StreamExt;
+
+ use crate::test_impl as test;
+ use crate::test_impl::TestError;
+ use crate::AsyncDag;
+ use crate::DagBackend;
+
+ #[test]
+ fn test_dag_two_nodes() {
+ let head = test::Node {
+ parents: vec![test::Id(0)],
+ data: 1,
+ };
+
+ let b = test::Backend::new(vec![
+ {
+ Some(test::Node {
+ parents: vec![],
+ data: 0,
+ })
+ },
+ { Some(head.clone()) },
+ ]);
+
+ {
+ let node = tokio_test::block_on(b.get(test::Id(1))).unwrap().unwrap();
+ assert_eq!(node.1.data, 1);
+ assert!(!node.1.parents.is_empty()); // to check whether the parent is set
+ }
+
+ let dag = tokio_test::block_on(AsyncDag::new(b, head));
+ assert!(dag.is_ok());
+ let dag = dag.unwrap();
+
+ {
+ let has_id = tokio_test::block_on(dag.has_id(&test::Id(0)));
+ assert!(has_id.is_ok());
+ let has_id = has_id.unwrap();
+ assert!(has_id);
+ }
+ {
+ let has_id = tokio_test::block_on(dag.has_id(&test::Id(1)));
+ assert!(has_id.is_ok());
+ let has_id = has_id.unwrap();
+ assert!(has_id);
+ }
+ }
+
+ #[test]
+ fn test_dag_two_nodes_stream() {
+ let head = test::Node {
+ parents: vec![test::Id(0)],
+ data: 1,
+ };
+
+ let b = test::Backend::new(vec![
+ {
+ Some(test::Node {
+ parents: vec![],
+ data: 0,
+ })
+ },
+ { Some(head.clone()) },
+ ]);
+
+ let dag = tokio_test::block_on(AsyncDag::new(b, head));
+ assert!(dag.is_ok());
+ let dag = dag.unwrap();
+
+ let v = tokio_test::block_on(dag.stream().collect::<Vec<_>>());
+
+ assert_eq!(v.len(), 2, "Expected two nodes: {:?}", v);
+ assert_eq!(v[0].as_ref().unwrap().0, test::Id(1));
+ assert_eq!(v[1].as_ref().unwrap().0, test::Id(0));
+ }
+
+ #[test]
+ fn test_adding_head() {
+ let head = test::Node {
+ parents: vec![],
+ data: 0,
+ };
+ let b = test::Backend::new(vec![Some(head.clone())]);
+
+ let dag = tokio_test::block_on(AsyncDag::new(b, head));
+ assert!(dag.is_ok());
+ let mut dag = dag.unwrap();
+
+ let new_head = test::Node {
+ parents: vec![test::Id(0)],
+ data: 1,
+ };
+
+ assert_eq!(dag.backend.0.read().unwrap().len(), 1);
+ assert_eq!(dag.head, test::Id(0));
+
+ let id = tokio_test::block_on(dag.update_head(new_head));
+ assert!(id.is_ok());
+ let _ = id.unwrap();
+
+ assert_eq!(dag.backend.0.read().unwrap().len(), 2);
+ assert_eq!(dag.head, test::Id(1));
+
+ assert_eq!(dag.backend.0.read().unwrap()[0].as_ref().unwrap().data, 0);
+ assert!(dag.backend.0.read().unwrap()[0]
+ .as_ref()
+ .unwrap()
+ .parents
+ .is_empty());
+
+ assert_eq!(dag.backend.0.read().unwrap()[1].as_ref().unwrap().data, 1);
+ assert_eq!(
+ dag.backend.0.read().unwrap()[1]
+ .as_ref()
+ .unwrap()
+ .parents
+ .len(),
+ 1
+ );
+ assert_eq!(
+ dag.backend.0.read().unwrap()[1].as_ref().unwrap().parents[0],
+ test::Id(0)
+ );
+ }
+
+ #[test]
+ fn test_branch() {
+ let mut dag = {
+ let head = test::Node {
+ parents: vec![],
+ data: 0,
+ };
+ let b = test::Backend::new(vec![Some(head.clone())]);
+ let dag = tokio_test::block_on(AsyncDag::new(b, head));
+ assert!(dag.is_ok());
+ dag.unwrap()
+ };
+
+ let branched = dag.branch();
+
+ {
+ assert_eq!(dag.backend.0.read().unwrap().len(), 1);
+ assert_eq!(dag.head, test::Id(0));
+ let new_head = test::Node {
+ parents: vec![test::Id(0)],
+ data: 1,
+ };
+
+ let id = tokio_test::block_on(dag.update_head(new_head));
+ assert!(id.is_ok());
+ let _ = id.unwrap();
+
+ assert_eq!(dag.backend.0.read().unwrap().len(), 2);
+ assert_eq!(dag.head, test::Id(1));
+ }
+
+ assert_eq!(branched.backend.0.read().unwrap().len(), 2);
+ assert_eq!(branched.head, test::Id(0));
+ }
+
+ #[test]
+ fn test_merging() {
+ let mut dag = {
+ let head = test::Node {
+ parents: vec![],
+ data: 0,
+ };
+ let b = test::Backend::new(vec![Some(head.clone())]);
+ let dag = tokio_test::block_on(AsyncDag::new(b, head));
+ assert!(dag.is_ok());
+ dag.unwrap()
+ };
+
+ let mut branched = dag.branch();
+
+ {
+ assert_eq!(dag.backend.0.read().unwrap().len(), 1);
+ assert_eq!(dag.head, test::Id(0));
+ let new_head = test::Node {
+ parents: vec![test::Id(0)],
+ data: 1,
+ };
+
+ let id = tokio_test::block_on(dag.update_head(new_head));
+ assert!(id.is_ok());
+ let _ = id.unwrap();
+
+ assert_eq!(dag.backend.0.read().unwrap().len(), 2);
+ assert_eq!(dag.head, test::Id(1));
+ }
+
+ {
+ assert_eq!(branched.backend.0.read().unwrap().len(), 2);
+ assert_eq!(branched.head, test::Id(0));
+ let new_head = test::Node {
+ parents: vec![test::Id(0)],
+ data: 2,
+ };
+
+ let id = tokio_test::block_on(branched.update_head(new_head));
+ assert!(id.is_ok());
+ let _ = id.unwrap();
+
+ assert_eq!(branched.backend.0.read().unwrap().len(), 3);
+ assert_eq!(branched.head, test::Id(2));
+ }
+
+ struct M;
+ impl super::Merger<test::Id, test::Node> for M {
+ type Error = TestError;
+
+ fn create_merge_node(
+ &self,
+ left_id: &test::Id,
+ right_id: &test::Id,
+ ) -> Result<test::Node, Self::Error> {
+ Ok(test::Node {
+ parents: vec![*left_id, *right_id],
+ data: 3,
+ })
+ }
+ }
+
+ let merge = tokio_test::block_on(dag.merge(&branched, M));
+ assert!(merge.is_ok());
+ let _ = merge.unwrap();
+
+ assert_eq!(dag.backend.0.read().unwrap().len(), 4);
+ assert_eq!(dag.head, test::Id(3));
+ }
+
+ #[test]
+ fn test_merging_merge_fn() {
+ let mut dag = {
+ let head = test::Node {
+ parents: vec![],
+ data: 0,
+ };
+ let b = test::Backend::new(vec![Some(head.clone())]);
+ let dag = tokio_test::block_on(AsyncDag::new(b, head));
+ assert!(dag.is_ok());
+ dag.unwrap()
+ };
+
+ let mut branched = dag.branch();
+
+ {
+ assert_eq!(dag.backend.0.read().unwrap().len(), 1);
+ assert_eq!(dag.head, test::Id(0));
+ let new_head = test::Node {
+ parents: vec![test::Id(0)],
+ data: 1,
+ };
+
+ let id = tokio_test::block_on(dag.update_head(new_head));
+ assert!(id.is_ok());
+ let _ = id.unwrap();
+
+ assert_eq!(dag.backend.0.read().unwrap().len(), 2);
+ assert_eq!(dag.head, test::Id(1));
+ }
+
+ {
+ assert_eq!(branched.backend.0.read().unwrap().len(), 2);
+ assert_eq!(branched.head, test::Id(0));
+ let new_head = test::Node {
+ parents: vec![test::Id(0)],
+ data: 2,
+ };
+
+ let id = tokio_test::block_on(branched.update_head(new_head));
+ assert!(id.is_ok());
+ let _ = id.unwrap();
+
+ assert_eq!(branched.backend.0.read().unwrap().len(), 3);
+ assert_eq!(branched.head, test::Id(2));
+ }
+
+ let merge = tokio_test::block_on(dag.merge(
+ &branched,
+ |left_id: &test::Id, right_id: &test::Id| {
+ Ok(test::Node {
+ parents: vec![*left_id, *right_id],
+ data: 3,
+ })
+ },
+ ));
+ assert!(merge.is_ok());
+ let _ = merge.unwrap();
+
+ assert_eq!(dag.backend.0.read().unwrap().len(), 4);
+ assert_eq!(dag.head, test::Id(3));
+ }
+}
diff --git a/src/dag_backend.rs b/src/dag_backend.rs
new file mode 100644
index 0000000..09dc428
--- /dev/null
+++ b/src/dag_backend.rs
@@ -0,0 +1,106 @@
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+//
+
+use async_trait::async_trait;
+
+use crate::Node;
+use crate::NodeId;
+
+/// An interface to a DAG backend storage
+///
+/// A DAG backend storage is nothing more than a thing that can store (`DagBackend::put`) and load
+/// (`DagBackend::get`) nodes.
+#[async_trait]
+pub trait DagBackend<Id, N>
+where
+ N: Node,
+ Id: NodeId + Send,
+{
+ type Error;
+
+ /// Get a `Node` from the backend that is identified by `id`
+ ///
+ /// # Returns
+ ///
+ /// * Should return Err(_) if the operation failed.
+ /// * Should return Ok(None) if there is no node that is identified by `id`
+ /// * Otherwise return the Id along with the node identified by it
+ async fn get(&self, id: Id) -> Result<Option<(Id, N)>, Self::Error>;
+
+ /// Store a `node` in the backend, returning its `Id`
+ ///
+ /// This function should store the `node` in the backend and return the `id` the node has.
+ async fn put(&mut self, node: N) -> Result<Id, Self::Error>;
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::test_impl as test;
+ use crate::*;
+
+ #[test]
+ fn test_backend_get() {
+ let b = test::Backend::new(vec![Some(test::Node {
+ parents: vec![],
+ data: 0,
+ })]);
+
+ let node =