summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.builds/debian.yml16
-rw-r--r--Cargo.toml20
-rw-r--r--README.md42
-rw-r--r--src/async_dag.rs511
-rw-r--r--src/dag_backend.rs106
-rw-r--r--src/id.rs18
-rw-r--r--src/lib.rs18
-rw-r--r--src/node.rs35
-rw-r--r--src/node_id.rs11
-rw-r--r--src/repository.rs26
-rw-r--r--src/test_impl.rs67
11 files changed, 786 insertions, 84 deletions
diff --git a/.builds/debian.yml b/.builds/debian.yml
new file mode 100644
index 0000000..111f38d
--- /dev/null
+++ b/.builds/debian.yml
@@ -0,0 +1,16 @@
+image: debian/buster
+sources:
+ - https://git.sr.ht/~matthiasbeyer/daglib
+tasks:
+ - install: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain 1.50.0
+ - build: |
+ cd daglib
+ PATH="$HOME/.cargo/bin:$PATH" cargo build --all --all-features
+ - test: |
+ cd daglib
+ PATH="$HOME/.cargo/bin:$PATH" cargo test --all --all-features
+triggers:
+ - action: email
+ condition: always
+ to: mail@beyermatthias.de
+
diff --git a/Cargo.toml b/Cargo.toml
index b460961..fafb950 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -1,8 +1,18 @@
[package]
-name = "daglib"
-version = "0.1.0"
-authors = ["Matthias Beyer <mail@beyermatthias.de>"]
-edition = "2018"
+name = "daglib"
+version = "0.1.0"
+authors = ["Matthias Beyer <mail@beyermatthias.de>"]
+edition = "2018"
+description = "Generic async DAG library"
[dependencies]
-futures = "0.1"
+# TODO: Replace with thiserror
+anyhow = "1"
+
+async-trait = "0.1"
+futures = "0.3"
+tokio = { version = "1", features = ["full"] }
+
+[dev-dependencies]
+tokio-test = "0.4"
+
diff --git a/README.md b/README.md
index 7923f86..50bad1a 100644
--- a/README.md
+++ b/README.md
@@ -1,13 +1,51 @@
# daglib
-Generic library for working with DAG data structures. Based on futures, so that
-it can be used in a async context.
+Async DAG library generic over Node type, Node Identifier Type and Backend
+(storage of the DAG).
+
## Status
Experimental. Do not use.
+## What is this?
+
+My Problem was, that I wanted to implement a DAG on IPFS/IPLD. The Problem,
+though, is that the existing IPFS/IPLD libraries do not feature functionality to
+build a DAG easily.
+There are libraries for DAGs, but they all implemented the storage functionality
+themselves. I needed a backend that is _async_ and _generic_, so that I can use
+IPFS (or anything else that works async) in the backend. So this lib was born.
+
+This library does define a simple interface for the underlying backend:
+
+```rust
+#[async_trait]
+pub trait DagBackend<Id, N>
+ where N: Node,
+ Id: NodeId + Send
+{
+ async fn get(&self, id: Id) -> Result<Option<N>>;
+ async fn put(&mut self, node: N) -> Result<Id>;
+}
+```
+
+and that's it. The `AsyncDag` type implements the DAG data structure on top of
+that.
+
+
+## Limitations
+
+Because this library was initialized to be used with IPFS in the backend, is has
+some limitations:
+
+* It does assume that you cannot ever delete nodes. You can
+ rewrite the DAG, but this does not necessarily delete nodes.
+
+This list will be extended in the future. Do not assume it is complete!
+
+
## License
MPL-2.0
diff --git a/src/async_dag.rs b/src/async_dag.rs
new file mode 100644
index 0000000..57ecc16
--- /dev/null
+++ b/src/async_dag.rs
@@ -0,0 +1,511 @@
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+//
+
+use std::pin::Pin;
+
+use anyhow::Result;
+use anyhow::anyhow;
+use futures::stream::StreamExt;
+use futures::task::Poll;
+
+use crate::Node;
+use crate::NodeId;
+use crate::DagBackend;
+
+/// An async DAG, generic over Node, Node identifier and Backend implementation
+pub struct AsyncDag<Id, N, Backend>
+ where Id: NodeId + Send,
+ N: Node<Id = Id>,
+ Backend: DagBackend<Id, N>
+{
+ head: Id,
+ backend: Backend,
+ _node: std::marker::PhantomData<N>,
+}
+
+impl<Id, N, Backend> AsyncDag<Id, N, Backend>
+ where Id: NodeId + Send,
+ N: Node<Id = Id>,
+ Backend: DagBackend<Id, N>
+{
+ /// Create a new DAG with a backend and a HEAD node
+ ///
+ /// The head node is `DagBackend::put` into the backend before the AsyncDag object is created.
+ pub async fn new(mut backend: Backend, head: N) -> Result<Self> {
+ backend
+ .put(head)
+ .await
+ .map(|id| {
+ AsyncDag {
+ head: id,
+ backend: backend,
+ _node: std::marker::PhantomData,
+ }
+ })
+ }
+
+ /// Load a AsyncDag object using `head` as HEAD node.
+ ///
+ /// # Warning
+ ///
+ /// This fails if backend.get(head) fails.
+ pub async fn load(backend: Backend, head: Id) -> Result<Self> {
+ backend.get(head)
+ .await?
+ .map(|(id, _)| {
+ AsyncDag {
+ head: id,
+ backend: backend,
+ _node: std::marker::PhantomData,
+ }
+ })
+ .ok_or_else(|| anyhow!("Node not found"))
+ }
+
+ pub fn head(&self) -> &Id {
+ &self.head
+ }
+
+ pub fn backend(&self) -> &Backend {
+ &self.backend
+ }
+
+ pub fn backend_mut(&mut self) -> &mut Backend {
+ &mut self.backend
+ }
+
+ /// Check whether an `id` is in the DAG.
+ pub async fn has_id(&self, id: &Id) -> Result<bool> {
+ self.stream()
+ .map(|r| -> Result<bool> {
+ r.map(|(ref node_id, _)| node_id == id)
+ })
+ .collect::<Vec<Result<bool>>>()
+ .await
+ .into_iter()
+ .fold(Ok(false), |acc, e| {
+ match (acc, e) {
+ (Err(e), _) => Err(e),
+ (Ok(_), Err(e)) => Err(e),
+ (Ok(a), Ok(b)) => Ok(a || b),
+ }
+ })
+ }
+
+ /// Iterate over the DAG
+ ///
+ /// This function returns a Stream over all nodes in the DAG.
+ ///
+ /// # Warning
+ ///
+ /// The order of the nodes is not (yet) guaranteed.
+ pub fn stream(&self) -> Stream<Id, N, Backend> {
+ Stream {
+ dag: self,
+ backlog: {
+ let mut v = Vec::with_capacity(2);
+ v.push(self.backend.get(self.head.clone()));
+ v
+ }
+ }
+ }
+
+ /// Update the HEAD pointer of the DAG
+ ///
+ /// # Warning
+ ///
+ /// fails if the passed node does not point to the current HEAD in its parents.
+ pub async fn update_head(&mut self, node: N) -> Result<Id> {
+ if node.parent_ids().iter().any(|id| id == &self.head) {
+ self.update_head_unchecked(node).await
+ } else {
+ Err(anyhow!("Node does not have HEAD as parent"))
+ }
+ }
+
+ /// Update the HEAD pointer of the DAG, unchecked
+ ///
+ /// # Warning
+ ///
+ /// Does not check whether the passed `node` does point to the current (then old) HEAD in its
+ /// parents. Be careful to not lose nodes from the DAG with this.
+ pub async fn update_head_unchecked(&mut self, node: N) -> Result<Id> {
+ let id = self.backend.put(node).await?;
+ self.head = id.clone();
+ Ok(id)
+ }
+
+ /// Branch from the current head.
+ ///
+ /// This function creates a new AsyncDag object with the same backend (thus the backend must be
+ /// `Clone` in this case).
+ pub fn branch(&self) -> AsyncDag<Id, N, Backend>
+ where Backend: Clone
+ {
+ AsyncDag {
+ head: self.head.clone(),
+ backend: self.backend.clone(),
+ _node: std::marker::PhantomData,
+ }
+ }
+
+ /// Merge another AsyncDag into this one
+ ///
+ /// Use the `merger` function to merge the two head IDs and generate a new Node instance for
+ /// the new HEAD of `self`.
+ pub async fn merge<M>(&mut self, other: &AsyncDag<Id, N, Backend>, merger: M) -> Result<Id>
+ where M: Merger<Id, N>
+ {
+ let node = merger.create_merge_node(&self.head, &other.head)?;
+ let id = self.backend.put(node).await?;
+ self.head = id.clone();
+ Ok(id)
+ }
+}
+
+pub trait Merger<Id, N>
+ where Id: NodeId,
+ N: Node<Id = Id>
+{
+ fn create_merge_node(&self, left_id: &Id, right_id: &Id) -> Result<N>;
+}
+
+impl<Id, N, F> Merger<Id, N> for F
+ where Id: NodeId,
+ N: Node<Id = Id>,
+ F: Fn(&Id, &Id) -> Result<N>,
+{
+ fn create_merge_node(&self, left_id: &Id, right_id: &Id) -> Result<N> {
+ (self)(left_id, right_id)
+ }
+}
+
+/// Stream adapter for streaming all nodes in a DAG
+pub struct Stream<'a, Id, N, Backend>
+ where Id: NodeId + Send,
+ N: Node<Id = Id>,
+ Backend: DagBackend<Id, N>
+{
+ dag: &'a AsyncDag<Id, N, Backend>,
+ backlog: Vec<Pin<Box<(dyn futures::future::Future<Output = Result<Option<(Id, N)>>> + std::marker::Send + 'a)>>>,
+}
+
+impl<'a, Id, N, Backend> futures::stream::Stream for Stream<'a, Id, N, Backend>
+ where Id: NodeId + Send,
+ N: Node<Id = Id>,
+ Backend: DagBackend<Id, N>
+{
+ type Item = Result<(Id, N)>;
+
+ fn poll_next(mut self: std::pin::Pin<&mut Self>, cx: &mut futures::task::Context<'_>) -> futures::task::Poll<Option<Self::Item>> {
+ if let Some(mut fut) = self.as_mut().backlog.pop() {
+ match fut.as_mut().poll(cx) {
+ Poll::Ready(Err(e)) => Poll::Ready(Some(Err(e))),
+ Poll::Ready(Ok(Some((node_id, node)))) => {
+ for parent in node.parent_ids().into_iter() {
+ let fut = self.dag.backend.get(parent.clone());
+ self.as_mut().backlog.push(fut);
+ }
+ Poll::Ready(Some(Ok((node_id, node))))
+ },
+ Poll::Ready(Ok(None)) => {
+ // backend.get() returned Ok(None), so the referenced node seems not to exist
+ //
+ // TODO: Decide whether we should return an error here.
+ cx.waker().wake_by_ref();
+ Poll::Pending
+ },
+ Poll::Pending => {
+ cx.waker().wake_by_ref();
+ Poll::Pending
+ }
+ }
+ } else {
+ Poll::Ready(None)
+ }
+ }
+}
+
+
+#[cfg(test)]
+mod tests {
+
+ use anyhow::Result;
+ use futures::StreamExt;
+
+ use crate::DagBackend;
+ use crate::AsyncDag;
+ use crate::test_impl as test;
+
+ #[test]
+ fn test_dag_two_nodes() {
+ let head = test::Node {
+ parents: vec![test::Id(0)],
+ data: 1,
+ };
+
+ let b = test::Backend::new(vec![
+ {
+ Some(test::Node {
+ parents: vec![],
+ data: 0,
+ })
+ },
+ {
+ Some(head.clone())
+ },
+ ]);
+
+ {
+ let node = tokio_test::block_on(b.get(test::Id(1))).unwrap().unwrap();
+ assert_eq!(node.1.data, 1);
+ assert!(!node.1.parents.is_empty()); // to check whether the parent is set
+ }
+
+ let dag = tokio_test::block_on(AsyncDag::new(b, head));
+ assert!(dag.is_ok());
+ let dag = dag.unwrap();
+
+ {
+ let has_id = tokio_test::block_on(dag.has_id(&test::Id(0)));
+ assert!(has_id.is_ok());
+ let has_id = has_id.unwrap();
+ assert!(has_id);
+ }
+ {
+ let has_id = tokio_test::block_on(dag.has_id(&test::Id(1)));
+ assert!(has_id.is_ok());
+ let has_id = has_id.unwrap();
+ assert!(has_id);
+ }
+ }
+
+ #[test]
+ fn test_dag_two_nodes_stream() {
+ let head = test::Node {
+ parents: vec![test::Id(0)],
+ data: 1,
+ };
+
+ let b = test::Backend::new(vec![
+ {
+ Some(test::Node {
+ parents: vec![],
+ data: 0,
+ })
+ },
+ {
+ Some(head.clone())
+ },
+ ]);
+
+ let dag = tokio_test::block_on(AsyncDag::new(b, head));
+ assert!(dag.is_ok());
+ let dag = dag.unwrap();
+
+ let v = tokio_test::block_on(dag.stream().collect::<Vec<_>>());
+
+ assert_eq!(v.len(), 2, "Expected two nodes: {:?}", v);
+ assert_eq!(v[0].as_ref().unwrap().0, test::Id(1));
+ assert_eq!(v[1].as_ref().unwrap().0, test::Id(0));
+ }
+
+ #[test]
+ fn test_adding_head() {
+ let head = test::Node {
+ parents: vec![],
+ data: 0,
+ };
+ let b = test::Backend::new(vec![Some(head.clone())]);
+
+ let dag = tokio_test::block_on(AsyncDag::new(b, head));
+ assert!(dag.is_ok());
+ let mut dag = dag.unwrap();
+
+ let new_head = test::Node {
+ parents: vec![test::Id(0)],
+ data: 1,
+ };
+
+ assert_eq!(dag.backend.0.read().unwrap().len(), 1);
+ assert_eq!(dag.head, test::Id(0));
+
+ let id = tokio_test::block_on(dag.update_head(new_head));
+ assert!(id.is_ok());
+ let _ = id.unwrap();
+
+ assert_eq!(dag.backend.0.read().unwrap().len(), 2);
+ assert_eq!(dag.head, test::Id(1));
+
+ assert_eq!(dag.backend.0.read().unwrap()[0].as_ref().unwrap().data, 0);
+ assert!(dag.backend.0.read().unwrap()[0].as_ref().unwrap().parents.is_empty());
+
+ assert_eq!(dag.backend.0.read().unwrap()[1].as_ref().unwrap().data, 1);
+ assert_eq!(dag.backend.0.read().unwrap()[1].as_ref().unwrap().parents.len(), 1);
+ assert_eq!(dag.backend.0.read().unwrap()[1].as_ref().unwrap().parents[0], test::Id(0));
+ }
+
+ #[test]
+ fn test_branch() {
+ let mut dag = {
+ let head = test::Node {
+ parents: vec![],
+ data: 0,
+ };
+ let b = test::Backend::new(vec![Some(head.clone())]);
+ let dag = tokio_test::block_on(AsyncDag::new(b, head));
+ assert!(dag.is_ok());
+ dag.unwrap()
+ };
+
+ let branched = dag.branch();
+
+ {
+ assert_eq!(dag.backend.0.read().unwrap().len(), 1);
+ assert_eq!(dag.head, test::Id(0));
+ let new_head = test::Node {
+ parents: vec![test::Id(0)],
+ data: 1,
+ };
+
+ let id = tokio_test::block_on(dag.update_head(new_head));
+ assert!(id.is_ok());
+ let _ = id.unwrap();
+
+ assert_eq!(dag.backend.0.read().unwrap().len(), 2);
+ assert_eq!(dag.head, test::Id(1));
+ }
+
+ assert_eq!(branched.backend.0.read().unwrap().len(), 2);
+ assert_eq!(branched.head, test::Id(0));
+ }
+
+ #[test]
+ fn test_merging() {
+ let mut dag = {
+ let head = test::Node {
+ parents: vec![],
+ data: 0,
+ };
+ let b = test::Backend::new(vec![Some(head.clone())]);
+ let dag = tokio_test::block_on(AsyncDag::new(b, head));
+ assert!(dag.is_ok());
+ dag.unwrap()
+ };
+
+ let mut branched = dag.branch();
+
+ {
+ assert_eq!(dag.backend.0.read().unwrap().len(), 1);
+ assert_eq!(dag.head, test::Id(0));
+ let new_head = test::Node {
+ parents: vec![test::Id(0)],
+ data: 1,
+ };
+
+ let id = tokio_test::block_on(dag.update_head(new_head));
+ assert!(id.is_ok());
+ let _ = id.unwrap();
+
+ assert_eq!(dag.backend.0.read().unwrap().len(), 2);
+ assert_eq!(dag.head, test::Id(1));
+ }
+
+ {
+ assert_eq!(branched.backend.0.read().unwrap().len(), 2);
+ assert_eq!(branched.head, test::Id(0));
+ let new_head = test::Node {
+ parents: vec![test::Id(0)],
+ data: 2,
+ };
+
+ let id = tokio_test::block_on(branched.update_head(new_head));
+ assert!(id.is_ok());
+ let _ = id.unwrap();
+
+ assert_eq!(branched.backend.0.read().unwrap().len(), 3);
+ assert_eq!(branched.head, test::Id(2));
+ }
+
+ struct M;
+ impl super::Merger<test::Id, test::Node> for M {
+ fn create_merge_node(&self, left_id: &test::Id, right_id: &test::Id) -> Result<test::Node> {
+ Ok(test::Node {
+ parents: vec![left_id.clone(), right_id.clone()],
+ data: 3,
+ })
+ }
+ }
+
+ let merge = tokio_test::block_on(dag.merge(&branched, M));
+ assert!(merge.is_ok());
+ let _ = merge.unwrap();
+
+ assert_eq!(dag.backend.0.read().unwrap().len(), 4);
+ assert_eq!(dag.head, test::Id(3));
+ }
+
+ #[test]
+ fn test_merging_merge_fn() {
+ let mut dag = {
+ let head = test::Node {
+ parents: vec![],
+ data: 0,
+ };
+ let b = test::Backend::new(vec![Some(head.clone())]);
+ let dag = tokio_test::block_on(AsyncDag::new(b, head));
+ assert!(dag.is_ok());
+ dag.unwrap()
+ };
+
+ let mut branched = dag.branch();
+
+ {
+ assert_eq!(dag.backend.0.read().unwrap().len(), 1);
+ assert_eq!(dag.head, test::Id(0));
+ let new_head = test::Node {
+ parents: vec![test::Id(0)],
+ data: 1,
+ };
+
+ let id = tokio_test::block_on(dag.update_head(new_head));
+ assert!(id.is_ok());
+ let _ = id.unwrap();
+
+ assert_eq!(dag.backend.0.read().unwrap().len(), 2);
+ assert_eq!(dag.head, test::Id(1));
+ }
+
+ {
+ assert_eq!(branched.backend.0.read().unwrap().len(), 2);
+ assert_eq!(branched.head, test::Id(0));
+ let new_head = test::Node {
+ parents: vec![test::Id(0)],
+ data: 2,
+ };
+
+ let id = tokio_test::block_on(branched.update_head(new_head));
+ assert!(id.is_ok());
+ let _ = id.unwrap();
+
+ assert_eq!(branched.backend.0.read().unwrap().len(), 3);
+ assert_eq!(branched.head, test::Id(2));
+ }
+
+ let merge = tokio_test::block_on(dag.merge(&branched, |left_id: &test::Id, right_id: &test::Id| {
+ Ok(test::Node {
+ parents: vec![left_id.clone(), right_id.clone()],
+ data: 3,
+ })
+ }));
+ assert!(merge.is_ok());
+ let _ = merge.unwrap();
+
+ assert_eq!(dag.backend.0.read().unwrap().len(), 4);
+ assert_eq!(dag.head, test::Id(3));
+ }
+}
+
diff --git a/src/dag_backend.rs b/src/dag_backend.rs
new file mode 100644
index 0000000..b3077c4
--- /dev/null
+++ b/src/dag_backend.rs
@@ -0,0 +1,106 @@
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+//
+
+use anyhow::Result;
+use async_trait::async_trait;
+
+use crate::NodeId;
+use crate::Node;
+
+/// An interface to a DAG backend storage
+///
+/// A DAG backend storage is nothing more than a thing that can store (`DagBackend::put`) and load
+/// (`DagBackend::get`) nodes.
+#[async_trait]
+pub trait DagBackend<Id, N>
+ where N: Node,
+ Id: NodeId + Send
+{
+
+ /// Get a `Node` from the backend that is identified by `id`
+ ///
+ /// # Returns
+ ///
+ /// * Should return Err(_) if the operation failed.
+ /// * Should return Ok(None) if there is no node that is identified by `id`
+ /// * Otherwise return the Id along with the node identified by it
+ async fn get(&self, id: Id) -> Result<Option<(Id, N)>>;
+
+ /// Store a `node` in the backend, returning its `Id`
+ ///
+ /// This function should store the `node` in the backend and return the `id` the node has.
+ async fn put(&mut self, node: N) -> Result<Id>;
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::test_impl as test;
+ use crate::*;
+
+ #[test]
+ fn test_backend_get() {
+ let b = test::Backend::new(vec![Some(test::Node {
+ parents: vec![],
+ data: 0,
+ })]);
+
+ let node = tokio_test::block_on(b.get(test::Id(0)));
+ assert!(node.is_ok());
+ let node = node.unwrap();
+
+ assert!(node.is_some());
+ let node = node.unwrap();
+
+ assert_eq!(node.0, test::Id(0));
+ assert!(node.1.parents.is_empty());
+ }
+
+ #[test]
+ fn test_backend_put() {
+ let mut b = test::Backend::new(vec![Some(test::Node {
+ parents: vec![],
+ data: 0,
+ })]);
+
+ let _ = tokio_test::block_on(b.put({
+ test::Node {
+ parents: vec![],
+ data: 1,
+ }
+ }));
+
+ {
+ let node = tokio_test::block_on(b.get(test::Id(0)));
+ assert!(node.is_ok());
+ let node = node.unwrap();
+
+ assert!(node.is_some());
+ let node = node.unwrap();
+
+ assert_eq!(node.0, test::Id(0));
+ assert!(node.1.parents.is_empty());
+ }
+ {
+ let node = tokio_test::block_on(b.get(test::Id(1)));
+ assert!(node.is_ok());
+ let node = node.unwrap();
+
+ assert!(node.is_some());
+ let node = node.unwrap();
+
+ assert_eq!(node.0, test::Id(1));
+ assert!(node.1.parents.is_empty());
+ }
+ {
+ let node = tokio_test::block_on(b.get(test::Id(2)));
+ assert!(node.is_ok());
+ let node = node.unwrap();
+
+ assert!(node.is_none());
+ }
+ }
+
+}
diff --git a/src/id.rs b/src/id.rs
deleted file mode 100644
index 018ae60..0000000
--- a/src/id.rs
+++ /dev/null
@@ -1,18 +0,0 @@
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this
-// file, You can obtain one at http://mozilla.org/MPL/2.0/.
-//
-
-use std::fmt::Debug;
-
-/// An "ID" is an object that can be used to uniquely identify a node in the DAG.
-///
-/// In git-speak, this would be a SHA1 hash.
-///
-pub trait Id : Clone + Debug + PartialEq + Eq {
-
- /* empty */
-
-}
-
diff --git a/src/lib.rs b/src/lib.rs
index bda0996..655cef1 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -4,6 +4,18 @@
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
//
-pub mod id;
-pub mod node;
-pub mod repository;
+mod async_dag;
+pub use async_dag::*;
+
+mod dag_backend;
+pub use dag_backend::*;
+
+mod node;
+pub use node::*;
+
+mod node_id;
+pub use node_id::*;
+
+#[cfg(test)]
+mod test_impl;
+
diff --git a/src/node.rs b/src/node.rs
index b6a088b..794c4af 100644
--- a/src/node.rs
+++ b/src/node.rs
@@ -4,36 +4,11 @@
// file, You can obtain one at http://mozilla.org/MPL/2.0/.
//
-use std::fmt::Debug;
+use crate::NodeId;
-use crate::id::Id;
-
-use futures::future::Future;
-use futures::stream::Stream;
-
-/// A "Node" is an object of the (DA)Graph
-///
-/// In git-speak, this would be an "Object".
-///
-///
-/// # Equality
-///
-/// A node might be compared to another node. In git, for example, equality would be the equality
-/// of the nodes ids, because objects are non-mutable.
-///
-pub trait Node: Debug + PartialEq + Eq {
- type Id: Id;
- type NodePayload: Debug;
- type Error: Debug;
- type Payload: Future<Item = Self::NodePayload, Error = Self::Error>;
-
- /// It should be trivial to get the Id of a Node.
- fn id(&self) -> Self::Id;
-
- /// Fetch the payload of the node.
- fn payload(&self) -> Self::Payload;
-
- /// Fetch the Ids of the parents of this node
- fn parent_ids(&self) -> Stream<Item = Self::Id, Error = Self::Error>;
+/// A Node in the DAG, holding the data.
+pub trait Node {
+ type Id: NodeId;
+ fn parent_ids(&self) -> Vec<Self::Id>;
}
diff --git a/src/node_id.rs b/src/node_id.rs
new file mode 100644
index 0000000..6211e19
--- /dev/null
+++ b/src/node_id.rs
@@ -0,0 +1,11 @@
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+//
+
+/// A unique identifier for a `Node`
+///
+/// The `NodeId` should be cheap to clone (for example a UUID or some form of a hash value).
+pub trait NodeId: Clone + Eq + PartialEq {
+}
diff --git a/src/repository.rs b/src/repository.rs
deleted file mode 100644
index 57f2d63..0000000
--- a/src/repository.rs
+++ /dev/null
@@ -1,26 +0,0 @@
-//
-// This Source Code Form is subject to the terms of the Mozilla Public
-// License, v. 2.0. If a copy of the MPL was not distributed with this
-// file, You can obtain one at http://mozilla.org/MPL/2.0/.
-//
-
-use std::fmt::Debug;
-
-use crate::node;
-use crate::id;
-
-use futures::future::Future;
-
-///
-pub trait Repository: Debug {
- type Id: id::Id;
- type Error: Debug;
- type Node: node::Node<Id = Self::Id, Error = Self::Error>;
-
- type Get: Future<Item = Self::Node, Error = Self::Error>;
-
- /// It should be trivial to get the Id of a Node.
- fn get<ID>(id: ID) -> Result<Self::Get, Self::Error>
- where ID: id::Id;
-}
-
diff --git a/src/test_impl.rs b/src/test_impl.rs
new file mode 100644
index 0000000..672c3fc
--- /dev/null
+++ b/src/test_impl.rs
@@ -0,0 +1,67 @@
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this
+// file, You can obtain one at http://mozilla.org/MPL/2.0/.
+//
+
+use std::sync::Arc;
+use std::sync::RwLock;
+
+use anyhow::Result;
+use async_trait::async_trait;
+
+#[derive(Copy, Clone, Eq, PartialEq, std::hash::Hash, Debug)]
+pub struct Id(pub(crate) usize);
+
+impl crate::NodeId for Id {}
+
+#[derive(Clone, Debug)]
+pub struct Node {
+ pub(crate) parents: Vec<Id>,
+ // data the node holds, used to create the ID in tests as "hashing" for unique id
+ pub(crate) data: usize,
+}
+
+impl crate::Node for Node {
+ type Id = Id;
+
+ fn parent_ids(&self) -> Vec<Self::Id> {
+ self.parents.clone()
+ }
+}
+
+/// The backend for the tests
+///
+/// This is `Clone` because we can test branching only with a clonable backend.
+/// A real backend would not implement the storage itself, but rather a way to retrieve the data
+/// from some storage mechansim (think IPFS), and thus `Clone`ing a backend is nothing esotheric.
+#[derive(Clone, Debug)]
+pub struct Backend(pub(crate) Arc<RwLock<Vec<Option<Node>>>>);
+
+impl Backend {
+ pub fn new(v: Vec<Option<Node>>) -> Self {
+ Backend(Arc::new(RwLock::new(v)))
+ }
+}
+
+#[async_trait]
+impl crate::DagBackend<Id, Node> for Backend {
+ async fn get(&self, id: Id) -> Result<Option<(Id, Node)>> {
+ if self.0.read().unwrap().len() < id.0 + 1 {
+ Ok(None)
+ } else {
+ Ok(self.0.read().unwrap()[id.0].clone().map(|node| (id, node)))
+ }
+ }
+
+ async fn put(&mut self, node: Node) -> Result<Id> {
+ while self.0.read().unwrap().len() < node.data + 1 {
+ self.0.write().unwrap().push(None)
+ }
+
+ let idx = node.data;
+ self.0.write().unwrap()[idx] = Some(node);
+ Ok(Id(idx))
+ }
+}
+