reorganize crates

pull/19/head
Niko PLP 2 years ago
parent 5486f894f4
commit 6087ab253b
  1. 6
      .gitignore
  2. 43
      Cargo.lock
  3. 9
      Cargo.toml
  4. 20
      README.md
  5. 11
      flake.nix
  6. 4
      ng-app-js/Cargo.toml
  7. 0
      ng-app-js/LICENSE-APACHE2
  8. 0
      ng-app-js/LICENSE-MIT
  9. 104
      ng-app-js/README.md
  10. 1
      ng-app-js/app-node/.gitignore
  11. 12
      ng-app-js/app-node/index.js
  12. 18
      ng-app-js/app-node/package.json
  13. 3
      ng-app-js/app-react/.babelrc
  14. 3
      ng-app-js/app-react/.gitignore
  15. 41
      ng-app-js/app-react/README.md
  16. 11325
      ng-app-js/app-react/package-lock.json
  17. 31
      ng-app-js/app-react/package.json
  18. 23
      ng-app-js/app-react/public/index.html
  19. 38
      ng-app-js/app-react/src/index.jsx
  20. 40
      ng-app-js/app-react/webpack.config.js
  21. 2
      ng-app-js/app-web/.gitignore
  22. 201
      ng-app-js/app-web/LICENSE-APACHE
  23. 25
      ng-app-js/app-web/LICENSE-MIT
  24. 25
      ng-app-js/app-web/README.md
  25. 15
      ng-app-js/app-web/bootstrap.js
  26. 22
      ng-app-js/app-web/index.html
  27. 12
      ng-app-js/app-web/index.js
  28. 5845
      ng-app-js/app-web/package-lock.json
  29. 35
      ng-app-js/app-web/package.json
  30. 14
      ng-app-js/app-web/webpack.config.js
  31. 27
      ng-app-js/index.html
  32. 9
      ng-app-js/prepare-node.js
  33. 16
      ng-app-js/src/lib.rs
  34. 15
      ng-app-web/index.html
  35. 11
      ng-app-web/src/lib.rs
  36. 24
      ngcli/Cargo.toml
  37. 636
      ngcli/src/main.rs
  38. 12
      ngd/Cargo.toml
  39. 18
      ngd/src/main.rs
  40. 27
      p2p-broker/Cargo.toml
  41. 178
      p2p-broker/src/auth.rs
  42. 197
      p2p-broker/src/broker_store_account.rs
  43. 103
      p2p-broker/src/broker_store_config.rs
  44. 219
      p2p-broker/src/broker_store_overlay.rs
  45. 163
      p2p-broker/src/broker_store_peer.rs
  46. 103
      p2p-broker/src/broker_store_repostoreinfo.rs
  47. 136
      p2p-broker/src/broker_store_topic.rs
  48. 148
      p2p-broker/src/connection_local.rs
  49. 20
      p2p-broker/src/lib.rs
  50. 891
      p2p-broker/src/server.rs
  51. 160
      p2p-broker/src/server_ws.rs
  52. 24
      p2p-client/Cargo.toml
  53. 596
      p2p-client/src/connection_remote.rs
  54. 95
      p2p-client/src/connection_ws.rs
  55. 47
      p2p-client/src/lib.rs
  56. 20
      p2p-net/Cargo.toml
  57. 337
      p2p-net/src/broker_connection.rs
  58. 162
      p2p-net/src/errors.rs
  59. 5
      p2p-net/src/lib.rs
  60. 1736
      p2p-net/src/types.rs
  61. 20
      p2p-repo/Cargo.toml
  62. 116
      p2p-repo/src/block.rs
  63. 581
      p2p-repo/src/branch.rs
  64. 64
      p2p-repo/src/broker_store.rs
  65. 454
      p2p-repo/src/commit.rs
  66. 29
      p2p-repo/src/errors.rs
  67. 19
      p2p-repo/src/lib.rs
  68. 909
      p2p-repo/src/object.rs
  69. 46
      p2p-repo/src/repo.rs
  70. 109
      p2p-repo/src/store.rs
  71. 530
      p2p-repo/src/types.rs
  72. 79
      p2p-repo/src/utils.rs
  73. 5
      p2p-stores-lmdb/Cargo.toml
  74. 233
      p2p-stores-lmdb/src/broker_store.rs
  75. 3
      p2p-stores-lmdb/src/lib.rs
  76. 997
      p2p-stores-lmdb/src/repo_store.rs
  77. 17
      p2p-verifier/Cargo.toml
  78. 0
      p2p-verifier/src/lib.rs

6
.gitignore vendored

@ -0,0 +1,6 @@
*~
!.github
\#*
/target
/result*
.DS_Store

43
Cargo.lock generated

@ -947,7 +947,7 @@ dependencies = [
]
[[package]]
name = "ng-app-web"
name = "ng-app-js-sdk"
version = "0.1.0"
dependencies = [
"wasm-bindgen",
@ -959,12 +959,12 @@ version = "0.1.0"
dependencies = [
"assert_cmd",
"async-std",
"async-tungstenite",
"debug_print",
"ed25519-dalek",
"fastbloom-rs",
"futures",
"p2p-broker",
"p2p-client",
"p2p-net",
"p2p-repo",
"p2p-stores-lmdb",
@ -978,14 +978,7 @@ name = "ngd"
version = "0.1.0"
dependencies = [
"async-std",
"async-tungstenite",
"debug_print",
"futures",
"p2p-broker",
"p2p-net",
"p2p-repo",
"p2p-stores-lmdb",
"tempfile",
]
[[package]]
@ -1088,12 +1081,10 @@ dependencies = [
name = "p2p-broker"
version = "0.1.0"
dependencies = [
"async-broadcast",
"async-channel",
"async-oneshot",
"async-std",
"async-trait",
"blake3",
"async-tungstenite",
"chacha20",
"debug_print",
"futures",
@ -1107,26 +1098,37 @@ dependencies = [
"serde_bare",
"serde_bytes",
"tempfile",
"xactor",
]
[[package]]
name = "p2p-client"
version = "0.1.0"
dependencies = [
"blake3",
"async-channel",
"async-oneshot",
"async-std",
"async-trait",
"async-tungstenite",
"chacha20",
"debug_print",
"futures",
"p2p-net",
"p2p-repo",
"serde",
"serde_bare",
"serde_bytes",
"xactor",
]
[[package]]
name = "p2p-net"
version = "0.1.0"
dependencies = [
"async-broadcast",
"async-trait",
"blake3",
"debug_print",
"futures",
"num_enum",
"p2p-repo",
"serde",
@ -1163,6 +1165,19 @@ dependencies = [
"tempfile",
]
[[package]]
name = "p2p-verifier"
version = "0.1.0"
dependencies = [
"blake3",
"chacha20",
"p2p-net",
"p2p-repo",
"serde",
"serde_bare",
"serde_bytes",
]
[[package]]
name = "parking"
version = "2.0.0"

@ -4,8 +4,13 @@ members = [
"p2p-net",
"p2p-broker",
"p2p-client",
"p2p-verifier",
"p2p-stores-lmdb",
"ngcli",
"ngd",
"ng-app-web",
]
"ng-app-js",
]
[profile.release]
lto = true
opt-level = 's'

@ -37,6 +37,21 @@ cd nextgraph-rs
nix develop
cargo build
```
### Packages
The crates are organized as follow :
- p2p-repo : all the common types, traits and structs for the P2P repositories
- p2p-net : all the common types, traits and structs for the P2P networks
- p2p-broker : the broker code (as server and core peer)
- p2p-client : the client connecting to a broker, used by the apps and verifier
- p2p-stores-lmdb : lmdb backed stores for the p2p layer
- p2p-verifier : the code of the verifier
- ngcli : CLI tool to manipulate the repos
- ngd : binary executable of the daemon (that can run a broker, verifier and/or Rust services)
- ng-app-js : contains the JS SDK, the web app, react app, and some node services
### Run
Build & run executables:
@ -60,6 +75,11 @@ Test a single module:
cargo test --package p2p-repo --lib -- branch::test --nocapture
```
Test end-to-end client and server:
```
cargo test --package ngcli -- --nocapture
```
### Build a package
Build the default package (`.#ngd`):

@ -29,6 +29,7 @@
.buildRustPackage;
myNativeBuildInputs = with pkgs;
[
nodejs
pkgconfig
(rust-bin.stable.latest.default.override {
targets = [ "wasm32-unknown-unknown" ];
@ -82,6 +83,10 @@
pname = "p2p-client";
buildAndTestSubdir = "./p2p-client";
};
p2p-verifier = myBuildRustPackage rec {
pname = "p2p-verifier";
buildAndTestSubdir = "./p2p-verifier";
};
p2p-stores-lmdb = myBuildRustPackage rec {
pname = "stores-lmdb";
buildAndTestSubdir = "./stores-lmdb";
@ -94,9 +99,9 @@
pname = "ngd";
buildAndTestSubdir = "./ngd";
};
ng-app-web = myBuildRustPackage rec {
pname = "ng-app-web";
buildAndTestSubdir = "./ng-app-web";
ng-app-js = myBuildRustPackage rec {
pname = "ng-app-js";
buildAndTestSubdir = "./ng-app-js";
};
default = ngd;
};

@ -1,10 +1,10 @@
[package]
name = "ng-app-web"
name = "ng-app-js-sdk"
version = "0.1.0"
edition = "2021"
license = "MIT/Apache-2.0"
authors = ["Niko PLP <niko@nextgraph.org>"]
description = "Web app client of NextGraph"
description = "JS app sdk of NextGraph"
repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs"
[package.metadata.wasm-pack.profile.release]

@ -0,0 +1,104 @@
# ng-app-js
JS/WASM module of NextGraph (SDK and apps)
## NextGraph
> NextGraph brings about the convergence between P2P and Semantic Web technologies, towards a decentralized, secure and privacy-preserving cloud, based on CRDTs.
>
> This open source ecosystem provides solutions for end-users and software developers alike, wishing to use or create **decentralized** apps featuring: **live collaboration** on rich-text documents, peer to peer communication with end-to-end encryption, offline-first, **local-first**, portable and interoperable data, total ownership of data and software, security and privacy. Centered on repositories containing **semantic data** (RDF), **rich text**, and structured data formats like **JSON**, synced between peers belonging to permissioned groups of users, it offers strong eventual consistency, thanks to the use of **CRDTs**. Documents can be linked together, signed, shared securely, queried using the **SPARQL** language and organized into sites and containers.
>
> More info here [https://nextgraph.org](https://nextgraph.org)
## JS/WASM module
This module is part of the SDK of NextGraph.
It is composed of
- the npm package `ng-app-js` which is the SDK
- the plain JS web app `app-web`
- the React web app `app-react`
- the node-js app `app-node`
## Support
Documentation can be found here [https://docs.nextgraph.org](https://docs.nextgraph.org)
And our community forum where you can ask questions is here [https://forum.nextgraph.org](https://forum.nextgraph.org)
## For developers
Read our [getting started guide](https://docs.nextgraph.org/en/getting-started/).
```
npm i ng-app-js-sdk
```
## For contributors
```
wasm-pack build --target bundler
cd pkg
// if you have access to npm registry and want to publish the package
// npm publish --access=public
cd ..
wasm-pack build -t nodejs -d pkg-node
node prepare-node.js
cd pkg-node
// if you have access to npm registry and want to publish the package
// npm publish --access=public
```
### Plain JS web app
```
cd ../app-web
// for local development
npm install --no-save ../pkg
// or, for install from npm registry: npm install
npm start
```
Open this URL in browser : [http://localhost:8080](http://localhost:8080)
### React web app
```
cd ../app-react
// for local development
npm install --no-save ../pkg
// or, for install from npm registry: npm install
npm run dev
```
Open this URL in browser : [http://localhost:8080](http://localhost:8080)
### NodeJS app
```
cd ../app-node
// for local development
npm install --no-save ../pkg-node
// or, for install from npm registry: npm install
npm run start
```
### Contributions license
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in the work by you shall be dual licensed as below, without any
additional terms or conditions.s
## License
Licensed under either of
* Apache License, Version 2.0 ([LICENSE-APACHE2](LICENSE-APACHE2) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
`SPDX-License-Identifier: Apache-2.0 OR MIT`
---
NextGraph received funding through the [NGI Assure Fund](https://nlnet.nl/project/NextGraph/index.html), a fund established by [NLnet](https://nlnet.nl/) with financial support from the European Commission's [Next Generation Internet](https://ngi.eu/) programme, under the aegis of DG Communications Networks, Content and Technology under grant agreement No 957073.

@ -0,0 +1 @@
node_modules

@ -0,0 +1,12 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
const ng = require("ng-app-node-sdk");
console.log(ng.change("you"));

@ -0,0 +1,18 @@
{
"name": "ng-app-node",
"version": "0.1.0",
"description": "NodeJS app for NextGraph",
"main": "index.js",
"scripts": {
"start": "node index.js"
},
"repository": {
"type": "git",
"url": "git+https://git.nextgraph.org/NextGraph/nextgraph-rs.git"
},
"author": "Niko PLP <niko@nextgraph.org>",
"license": "(MIT OR Apache-2.0)",
"dependencies": {
"ng-app-node-sdk": "^0.1.0"
}
}

@ -0,0 +1,3 @@
{
"presets": ["@babel/preset-env", "@babel/preset-react"]
}

@ -0,0 +1,3 @@
node_modules
dist
build

@ -0,0 +1,41 @@
# ng-app-react
React app client of NextGraph
## NextGraph
> NextGraph brings about the convergence between P2P and Semantic Web technologies, towards a decentralized, secure and privacy-preserving cloud, based on CRDTs.
>
> This open source ecosystem provides solutions for end-users and software developers alike, wishing to use or create **decentralized** apps featuring: **live collaboration** on rich-text documents, peer to peer communication with end-to-end encryption, offline-first, **local-first**, portable and interoperable data, total ownership of data and software, security and privacy. Centered on repositories containing **semantic data** (RDF), **rich text**, and structured data formats like **JSON**, synced between peers belonging to permissioned groups of users, it offers strong eventual consistency, thanks to the use of **CRDTs**. Documents can be linked together, signed, shared securely, queried using the **SPARQL** language and organized into sites and containers.
>
> More info here [https://nextgraph.org](https://nextgraph.org)
## For contributors
Build the JS SDK
```
cd ..
wasm-pack build --target bundler
```
```
cd app-react
npm install --no-save ../pkg
npm run dev
```
Open this URL in browser : [http://localhost:8080](http://localhost:8080)
## License
Licensed under either of
* Apache License, Version 2.0 ([LICENSE-APACHE2](LICENSE-APACHE2) or http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
`SPDX-License-Identifier: Apache-2.0 OR MIT`
---
NextGraph received funding through the [NGI Assure Fund](https://nlnet.nl/project/NextGraph/index.html), a fund established by [NLnet](https://nlnet.nl/) with financial support from the European Commission's [Next Generation Internet](https://ngi.eu/) programme, under the aegis of DG Communications Networks, Content and Technology under grant agreement No 957073.

File diff suppressed because it is too large Load Diff

@ -0,0 +1,31 @@
{
"name": "ng-app-react",
"version": "0.1.0",
"description": "React based web application of NextGraph",
"main": "src/index.jsx",
"scripts": {
"dev": "webpack server"
},
"keywords": [],
"author": "Niko PLP <niko@nextgraph.org>",
"license": "MIT/Apache-2.0",
"repository": {
"type": "git",
"url": "https://git.nextgraph.org/NextGraph/nextgraph-rs"
},
"dependencies": {
"react": "^18.2.0",
"react-dom": "^18.2.0",
"ng-app-js-sdk": "^0.1.0"
},
"devDependencies": {
"@babel/preset-env": "^7.20.2",
"@babel/preset-react": "^7.18.6",
"babel-core": "^6.26.3",
"babel-loader": "^9.1.2",
"html-webpack-plugin": "^5.5.0",
"webpack": "^5.75.0",
"webpack-cli": "^5.0.1",
"webpack-dev-server": "^4.11.1"
}
}

@ -0,0 +1,23 @@
<!--
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
-->
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>NextGraph</title>
</head>
<body>
<div id="root"></div>
</body>
</html>

@ -0,0 +1,38 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
import React, { useState } from "react";
import ReactDOM from "react-dom";
const ng_sdk = import("ng-app-js-sdk");
ng_sdk.then((ng) => {
const App = () => {
const [name, setName] = useState("");
const handleChange = (e) => {
setName(ng.change(e.target.value));
};
const handleClick = () => {
console.log(name);
ng.greet(name);
};
return (
<>
<div>
I say: {name}<br/>
<input type="text" onChange={handleChange} />
<button onClick={handleClick}>Say hello!</button>
</div>
</>
);
};
ReactDOM.render(<App />, document.getElementById("root"));
});

@ -0,0 +1,40 @@
const HtmlWebpackPlugin = require("html-webpack-plugin");
const path = require("path");
module.exports = {
entry: "./src/index.jsx",
output: {
path: path.resolve(__dirname, "dist"),
filename: "bundle.[hash].js",
},
devServer: {
compress: true,
port: 8080,
hot: true,
static: "./dist",
historyApiFallback: true,
open: true,
},
module: {
rules: [
{
test: /\.(js|jsx)$/,
exclude: /node_modules/,
use: {
loader: "babel-loader",
},
},
],
},
plugins: [
new HtmlWebpackPlugin({
template: __dirname + "/public/index.html",
filename: "index.html",
}),
],
experiments: {
asyncWebAssembly: true
},
mode: "development",
devtool: "inline-source-map",
};

@ -0,0 +1,2 @@
node_modules
dist

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

@ -0,0 +1,25 @@
Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

@ -1,6 +1,6 @@
# ng-app-web
Web JS/WASM module of NextGraph
Web app client of NextGraph
## NextGraph
@ -10,24 +10,23 @@ Web JS/WASM module of NextGraph
>
> More info here [https://nextgraph.org](https://nextgraph.org)
## Web JS/WASM module
## For contributors
This module is part of the SDK of NextGraph.
Build the JS SDK
## Support
Documentation can be found here [https://docs.nextgraph.org](https://docs.nextgraph.org)
And our community forum where you can ask questions is here [https://forum.nextgraph.org](https://forum.nextgraph.org)
## For developers
Read our [getting started guide](https://docs.nextgraph.org/en/getting-started/).
```
cd ..
wasm-pack build --target bundler
```
```
npm i np-app-web
cd app-web
npm install --no-save ../pkg
npm start
```
Open this URL in browser : [http://localhost:8080](http://localhost:8080)
## License
Licensed under either of

@ -0,0 +1,15 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
// A dependency graph that contains any wasm must all be imported
// asynchronously. This `bootstrap.js` file does the single async import, so
// that no one else needs to worry about it again.
import("./index.js")
.catch(e => console.error("Error importing `index.js`:", e));

@ -0,0 +1,22 @@
<!--
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
-->
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>NextGraph</title>
</head>
<body>
<noscript>This page contains webassembly and javascript content, please enable javascript in your browser.</noscript>
<script src="./bootstrap.js"></script>
</body>
</html>

@ -0,0 +1,12 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
import * as ng from "ng-app-js-sdk";
console.log(ng.change("you"));

File diff suppressed because it is too large Load Diff

@ -0,0 +1,35 @@
{
"name": "ng-app-web",
"version": "0.1.0",
"description": "Web app client of NextGraph",
"main": "index.js",
"scripts": {
"build": "webpack --config webpack.config.js",
"start": "webpack-dev-server"
},
"repository": {
"type": "git",
"url": "git+https://git.nextgraph.org/NextGraph/nextgraph-rs.git"
},
"keywords": [
"webassembly",
"wasm",
"rust",
"webpack"
],
"author": "Niko PLP <niko@nextgraph.org>",
"license": "(MIT OR Apache-2.0)",
"bugs": {
"url": "https://git.nextgraph.org/NextGraph/nextgraph-rs/issues"
},
"homepage": "https://docs.nextgraph.org",
"dependencies": {
"ng-app-js-sdk": "^0.1.0"
},
"devDependencies": {
"webpack": "^4.29.3",
"webpack-cli": "^3.1.0",
"webpack-dev-server": "^3.1.5",
"copy-webpack-plugin": "^5.0.0"
}
}

@ -0,0 +1,14 @@
const CopyWebpackPlugin = require("copy-webpack-plugin");
const path = require('path');
module.exports = {
entry: "./bootstrap.js",
output: {
path: path.resolve(__dirname, "dist"),
filename: "bootstrap.js",
},
mode: "development",
plugins: [
new CopyWebpackPlugin(['index.html'])
],
};

@ -0,0 +1,27 @@
<!--
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
-->
<!DOCTYPE html>
<html lang="en-US">
<head>
<meta charset="utf-8" />
<title>NextGraph web sdk test</title>
</head>
<body>
<p>run <code>python3 -m http.server</code> to use it</p>
<script type="module">
import init, { greet } from "./pkg/ng_app_js_sdk.js";
init().then(() => {
greet("WebAssembly");
});
</script>
</body>
</html>

@ -0,0 +1,9 @@
const fs = require('fs');
const PATH = './pkg-node/package.json';
const pkg_json = fs.readFileSync(PATH);
let pkg = JSON.parse(pkg_json)
pkg.name = "ng-app-node-sdk";
pkg.description = "nodejs app sdk of NextGraph";
fs.writeFileSync(PATH, JSON.stringify(pkg, null, 2), 'utf8');

@ -0,0 +1,16 @@
use wasm_bindgen::prelude::*;
#[wasm_bindgen]
extern {
pub fn alert(s: &str);
}
#[wasm_bindgen]
pub fn greet(name: &str) {
alert(&format!("I say: {}", name));
}
#[wasm_bindgen]
pub fn change(name: &str) -> JsValue {
JsValue::from_str(&format!("Hellooo, {}!", name))
}

@ -1,15 +0,0 @@
<!DOCTYPE html>
<html lang="en-US">
<head>
<meta charset="utf-8" />
<title>hello-wasm example</title>
</head>
<body>
<script type="module">
import init, { greet } from "./pkg/ng_app_web.js";
init().then(() => {
greet("WebAssembly");
});
</script>
</body>
</html>

@ -1,11 +0,0 @@
use wasm_bindgen::prelude::*;
#[wasm_bindgen]
extern {
pub fn alert(s: &str);
}
#[wasm_bindgen]
pub fn greet(name: &str) {
alert(&format!("Hello, {}!", name));
}

@ -0,0 +1,24 @@
[package]
name = "ngcli"
version = "0.1.0"
edition = "2021"
license = "MIT/Apache-2.0"
authors = ["Niko PLP <niko@nextgraph.org>"]
description = "CLI command-line interpreter of NextGraph"
repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs"
[dependencies]
debug_print = "1.0.0"
p2p-repo = { path = "../p2p-repo" }
p2p-net = { path = "../p2p-net" }
p2p-client = { path = "../p2p-client" }
p2p-broker = { path = "../p2p-broker" }
p2p-stores-lmdb = { path = "../p2p-stores-lmdb" }
async-std = { version = "1.7.0", features = ["attributes"] }
futures = "0.3.24"
xactor = "0.7.11"
tempfile = "3"
fastbloom-rs = "0.3.1"
rand = "0.7"
ed25519-dalek = "1.0.1"
assert_cmd = "2.0.5"

@ -0,0 +1,636 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// This code is partly derived from work written by TG x Thoth from P2Pcollab.
// Copyright 2022 TG x Thoth
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use debug_print::*;
use ed25519_dalek::*;
use fastbloom_rs::{BloomFilter as Filter, FilterBuilder, Membership};
use futures::{future, pin_mut, stream, SinkExt, StreamExt};
use p2p_repo::object::Object;
use p2p_repo::store::{store_max_value_size, store_valid_value_size, HashMapRepoStore, RepoStore};
use p2p_broker::broker_store_config::ConfigMode;
use p2p_stores_lmdb::broker_store::LmdbBrokerStore;
use p2p_stores_lmdb::repo_store::LmdbRepoStore;
use rand::rngs::OsRng;
use std::collections::HashMap;
use p2p_repo::types::*;
use p2p_repo::utils::{generate_keypair, now_timestamp};
use p2p_broker::server_ws::*;
use p2p_broker::server::*;
use p2p_net::errors::*;
use p2p_net::types::*;
use p2p_net::broker_connection::*;
use p2p_client::connection_remote::*;
use p2p_client::connection_ws::*;
use p2p_broker::connection_local::*;
fn block_size() -> usize {
store_max_value_size()
//store_valid_value_size(0)
}
async fn test_sync(cnx: &mut impl BrokerConnection, user_pub_key: PubKey, userpriv_key: PrivKey) {
fn add_obj(
content: ObjectContent,
deps: Vec<ObjectId>,
expiry: Option<Timestamp>,
repo_pubkey: PubKey,
repo_secret: SymKey,
store: &mut impl RepoStore,
) -> ObjectRef {
let max_object_size = 4000;
let obj = Object::new(
content,
deps,
expiry,
max_object_size,
repo_pubkey,
repo_secret,
);
//println!(">>> add_obj");
println!(" id: {}", obj.id());
//println!(" deps: {:?}", obj.deps());
obj.save(store).unwrap();
obj.reference().unwrap()
}
fn add_commit(
branch: ObjectRef,
author_privkey: PrivKey,
author_pubkey: PubKey,
seq: u32,
deps: Vec<ObjectRef>,
acks: Vec<ObjectRef>,
body_ref: ObjectRef,
repo_pubkey: PubKey,
repo_secret: SymKey,
store: &mut impl RepoStore,
) -> ObjectRef {
let mut obj_deps: Vec<ObjectId> = vec![];
obj_deps.extend(deps.iter().map(|r| r.id));
obj_deps.extend(acks.iter().map(|r| r.id));
let obj_ref = ObjectRef {
id: ObjectId::Blake3Digest32([1; 32]),
key: SymKey::ChaCha20Key([2; 32]),
};
let refs = vec![obj_ref];
let metadata = vec![5u8; 55];
let expiry = None;
let commit = Commit::new(
author_privkey,
author_pubkey,
seq,
branch,
deps,
acks,
refs,
metadata,
body_ref,
expiry,
)
.unwrap();
//println!("commit: {}", commit.id().unwrap());
add_obj(
ObjectContent::Commit(commit),
obj_deps,
expiry,
repo_pubkey,
repo_secret,
store,
)
}
fn add_body_branch(
branch: Branch,
repo_pubkey: PubKey,
repo_secret: SymKey,
store: &mut impl RepoStore,
) -> ObjectRef {
let deps = vec![];
let expiry = None;
let body = CommitBody::Branch(branch);
//println!("body: {:?}", body);
add_obj(
ObjectContent::CommitBody(body),
deps,
expiry,
repo_pubkey,
repo_secret,
store,
)
}
fn add_body_trans(
deps: Vec<ObjectId>,
repo_pubkey: PubKey,
repo_secret: SymKey,
store: &mut impl RepoStore,
) -> ObjectRef {
let expiry = None;
let content = [7u8; 777].to_vec();
let body = CommitBody::Transaction(Transaction::V0(content));
//println!("body: {:?}", body);
add_obj(
ObjectContent::CommitBody(body),
deps,
expiry,
repo_pubkey,
repo_secret,
store,
)
}
fn add_body_ack(
deps: Vec<ObjectId>,
repo_pubkey: PubKey,
repo_secret: SymKey,
store: &mut impl RepoStore,
) -> ObjectRef {
let expiry = None;
let body = CommitBody::Ack(Ack::V0());
//println!("body: {:?}", body);
add_obj(
ObjectContent::CommitBody(body),
deps,
expiry,
repo_pubkey,
repo_secret,
store,
)
}
let mut store = HashMapRepoStore::new();
let mut rng = OsRng {};
// repo
let repo_keypair: Keypair = Keypair::generate(&mut rng);
// println!(
// "repo private key: ({}) {:?}",
// repo_keypair.secret.as_bytes().len(),
// repo_keypair.secret.as_bytes()
// );
// println!(
// "repo public key: ({}) {:?}",
// repo_keypair.public.as_bytes().len(),
// repo_keypair.public.as_bytes()
// );
let _repo_privkey = PrivKey::Ed25519PrivKey(repo_keypair.secret.to_bytes());
let repo_pubkey = PubKey::Ed25519PubKey(repo_keypair.public.to_bytes());
let repo_secret = SymKey::ChaCha20Key([9; 32]);
let repolink = RepoLink::V0(RepoLinkV0 {
id: repo_pubkey,
secret: repo_secret,
peers: vec![],
});
// branch
let branch_keypair: Keypair = Keypair::generate(&mut rng);
//println!("branch public key: {:?}", branch_keypair.public.as_bytes());
let branch_pubkey = PubKey::Ed25519PubKey(branch_keypair.public.to_bytes());
let member_keypair: Keypair = Keypair::generate(&mut rng);
//println!("member public key: {:?}", member_keypair.public.as_bytes());
let member_privkey = PrivKey::Ed25519PrivKey(member_keypair.secret.to_bytes());
let member_pubkey = PubKey::Ed25519PubKey(member_keypair.public.to_bytes());
let metadata = [66u8; 64].to_vec();
let commit_types = vec![CommitType::Ack, CommitType::Transaction];
let secret = SymKey::ChaCha20Key([0; 32]);
let member = MemberV0::new(member_pubkey, commit_types, metadata.clone());
let members = vec![member];
let mut quorum = HashMap::new();
quorum.insert(CommitType::Transaction, 3);
let ack_delay = RelTime::Minutes(3);
let tags = [99u8; 32].to_vec();
let branch = Branch::new(
branch_pubkey,
branch_pubkey,
secret,
members,
quorum,
ack_delay,
tags,
metadata,
);
//println!("branch: {:?}", branch);
println!("branch deps/acks:");
println!("");
println!(" br");
println!(" / \\");
println!(" t1 t2");
println!(" / \\ / \\");
println!(" a3 t4<--t5-->(t1)");
println!(" / \\");
println!(" a6 a7");
println!("");
// commit bodies
let branch_body = add_body_branch(
branch.clone(),
repo_pubkey.clone(),
repo_secret.clone(),
&mut store,
);
let ack_body = add_body_ack(vec![], repo_pubkey, repo_secret, &mut store);
let trans_body = add_body_trans(vec![], repo_pubkey, repo_secret, &mut store);
// create & add commits to store
println!(">> br");
let br = add_commit(
branch_body,
member_privkey,
member_pubkey,
0,
vec![],
vec![],
branch_body,
repo_pubkey,
repo_secret,
&mut store,
);
println!(">> t1");
let t1 = add_commit(
branch_body,
member_privkey,
member_pubkey,
1,
vec![br],
vec![],
trans_body,
repo_pubkey,
repo_secret,
&mut store,
);
println!(">> t2");
let t2 = add_commit(
branch_body,
member_privkey,
member_pubkey,
2,
vec![br],
vec![],
trans_body,
repo_pubkey,
repo_secret,
&mut store,
);
println!(">> a3");
let a3 = add_commit(
branch_body,
member_privkey,
member_pubkey,
3,
vec![t1],
vec![],
ack_body,
repo_pubkey,
repo_secret,
&mut store,
);
println!(">> t4");
let t4 = add_commit(
branch_body,
member_privkey,
member_pubkey,
4,
vec![t2],
vec![t1],
trans_body,
repo_pubkey,
repo_secret,
&mut store,
);
println!(">> t5");
let t5 = add_commit(
branch_body,
member_privkey,
member_pubkey,
5,
vec![t1, t2],
vec![t4],
trans_body,
repo_pubkey,
repo_secret,
&mut store,
);
println!(">> a6");
let a6 = add_commit(
branch_body,
member_privkey,
member_pubkey,
6,
vec![t4],
vec![],
ack_body,
repo_pubkey,
repo_secret,
&mut store,
);
println!(">> a7");
let a7 = add_commit(
branch_body,
member_privkey,
member_pubkey,
7,
vec![t4],
vec![],
ack_body,
repo_pubkey,
repo_secret,
&mut store,
);
let mut public_overlay_cnx = cnx
.overlay_connect(&repolink, true)
.await
.expect("overlay_connect failed");
// Sending everything to the broker
for (v) in store.get_all() {
//debug_println!("SENDING {}", k);
let _ = public_overlay_cnx
.put_block(&v)
.await
.expect("put_block failed");
}
// Now emptying the local store of the client, and adding only 1 commit into it (br)
// we also have received an commit (t5) but we don't know what to do with it...
let mut store = HashMapRepoStore::new();
let br = add_commit(
branch_body,
member_privkey,
member_pubkey,
0,
vec![],
vec![],
branch_body,
repo_pubkey,
repo_secret,
&mut store,
);
let t5 = add_commit(
branch_body,
member_privkey,
member_pubkey,
5,
vec![t1, t2],
vec![t4],
trans_body,
repo_pubkey,
repo_secret,
&mut store,
);
debug_println!("LOCAL STORE HAS {} BLOCKS", store.get_len());
// Let's pretend that we know that the head of the branch in the broker is at commits a6 and a7.
// normally it would be the pub/sub that notifies us of those heads.
// now we want to synchronize with the broker.
let mut filter = Filter::new(FilterBuilder::new(10, 0.01));
for commit_ref in [br, t5] {
match commit_ref.id {
ObjectId::Blake3Digest32(d) => filter.add(&d),
}
}
let cfg = filter.config();
let known_commits = BloomFilter {
k: cfg.hashes,
f: filter.get_u8_array().to_vec(),
};
let known_heads = [br.id];
let remote_heads = [a6.id, a7.id];
let mut synced_blocks_stream = public_overlay_cnx
.sync_branch(remote_heads.to_vec(), known_heads.to_vec(), known_commits)
.await
.expect("sync_branch failed");
let mut i = 0;
while let Some(b) = synced_blocks_stream.next().await {
debug_println!("GOT BLOCK {}", b.id());
store.put(&b);
i += 1;
}
debug_println!("SYNCED {} BLOCKS", i);
debug_println!("LOCAL STORE HAS {} BLOCKS", store.get_len());
// now the client can verify the DAG and each commit. Then update its list of heads.
}
async fn test(cnx: &mut impl BrokerConnection, pub_key: PubKey, priv_key: PrivKey) -> Result<(), ProtocolError>{
cnx.add_user(PubKey::Ed25519PubKey([1; 32]), priv_key).await?;
cnx.add_user(pub_key, priv_key).await?;
//.expect("add_user 2 (myself) failed");
assert_eq!(
cnx.add_user(PubKey::Ed25519PubKey([1; 32]), priv_key).await.err().unwrap(),
ProtocolError::UserAlreadyExists
);
let repo = RepoLink::V0(RepoLinkV0 {
id: PubKey::Ed25519PubKey([1; 32]),
secret: SymKey::ChaCha20Key([0; 32]),
peers: vec![],
});
let mut public_overlay_cnx = cnx
.overlay_connect(&repo, true)
.await?;
let my_block_id = public_overlay_cnx
.put_block(&Block::new(
vec![],
ObjectDeps::ObjectIdList(vec![]),
None,
vec![27; 150],
None,
))
.await?;
debug_println!("added block_id to store {}", my_block_id);
let object_id = public_overlay_cnx
.put_object(
ObjectContent::File(File::V0(FileV0 {
content_type: vec![],
metadata: vec![],
content: vec![48; 69000],
})),
vec![],
None,
block_size(),
repo.id(),
repo.secret(),
)
.await?;
debug_println!("added object_id to store {}", object_id);
let mut my_block_stream = public_overlay_cnx
.get_block(my_block_id, true, None)
.await?;
//.expect("get_block failed");
while let Some(b) = my_block_stream.next().await {
debug_println!("GOT BLOCK {}", b.id());
}
let mut my_object_stream = public_overlay_cnx
.get_block(object_id, true, None)
.await?;
//.expect("get_block for object failed");
while let Some(b) = my_object_stream.next().await {
debug_println!("GOT BLOCK {}", b.id());
}
let object = public_overlay_cnx
.get_object(object_id, None)
.await?;
//.expect("get_object failed");
debug_println!("GOT OBJECT with ID {}", object.id());
// let object_id = public_overlay_cnx
// .copy_object(object_id, Some(now_timestamp() + 60))
// .await
// .expect("copy_object failed");
// debug_println!("COPIED OBJECT to OBJECT ID {}", object_id);
public_overlay_cnx
.delete_object(object_id)
.await?;
//.expect("delete_object failed");
let res = public_overlay_cnx
.get_object(object_id, None)
.await
.unwrap_err();
debug_println!("result from get object after delete: {}", res);
assert_eq!(res, ProtocolError::NotFound);
//TODO test pin/unpin
// TEST BRANCH SYNC
test_sync(cnx, pub_key, priv_key).await;
Ok(())
}
async fn test_local_connection() {
debug_println!("===== TESTING LOCAL API =====");
let root = tempfile::Builder::new()
.prefix("node-daemon")
.tempdir()
.unwrap();
let master_key: [u8; 32] = [0; 32];
std::fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap());
let store = LmdbBrokerStore::open(root.path(), master_key);
let mut server = BrokerServer::new(store, ConfigMode::Local).expect("starting broker");
let (priv_key, pub_key) = generate_keypair();
let mut cnx = server.local_connection(pub_key);
test(&mut cnx, pub_key, priv_key).await;
}
async fn test_remote_connection(url: &str) {
debug_println!("===== TESTING REMOTE API =====");
let (priv_key, pub_key) = generate_keypair();
let cnx_res = BrokerConnectionWebSocket::open(url, priv_key, pub_key).await;
match cnx_res {
Ok(mut cnx) => {
if let Err(e) = test(&mut cnx, pub_key, priv_key).await {
debug_println!("error: {:?}", e)
}
else {
cnx.close().await;
} }
Err(e) => {
}
}
}
#[xactor::main]
async fn main() -> std::io::Result<()> {
debug_println!("Starting nextgraph CLI...");
test_local_connection().await;
test_remote_connection("ws://127.0.0.1:3012").await;
Ok(())
}
#[cfg(test)]
mod test {
use crate::{test_local_connection, test_remote_connection};
#[async_std::test]
pub async fn test_local_cnx() {
xactor::block_on(test_local_connection());
}
use async_std::task;
use p2p_broker::server_ws::*;
#[async_std::test]
pub async fn test_remote_cnx() -> Result<(), Box<dyn std::error::Error>> {
let thr = task::spawn(run_server_accept_one("127.0.0.1:3012"));
std::thread::sleep(std::time::Duration::from_secs(2));
xactor::block_on(test_remote_connection("ws://127.0.0.1:3012"));
xactor::block_on(thr);
Ok(())
}
}

@ -0,0 +1,12 @@
[package]
name = "ngd"
version = "0.1.0"
edition = "2021"
license = "MIT/Apache-2.0"
authors = ["Niko PLP <niko@nextgraph.org>"]
description = "Daemon of NextGraph"
repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs"
[dependencies]
p2p-broker = { path = "../p2p-broker" }
async-std = { version = "1.7.0", features = ["attributes"] }

@ -0,0 +1,18 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use p2p_broker::server_ws::run_server;
#[async_std::main]
async fn main() -> std::io::Result<()> {
println!("Starting NextGraph daemon...");
run_server("127.0.0.1:3012").await
}

@ -0,0 +1,27 @@
[package]
name = "p2p-broker"
version = "0.1.0"
edition = "2021"
license = "MIT/Apache-2.0"
authors = ["Niko PLP <niko@nextgraph.org>"]
description = "P2P Broker module of NextGraph"
repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs"
[dependencies]
debug_print = "1.0.0"
p2p-repo = { path = "../p2p-repo" }
p2p-net = { path = "../p2p-net" }
p2p-stores-lmdb = { path = "../p2p-stores-lmdb" }
chacha20 = "0.9.0"
serde = { version = "1.0", features = ["derive"] }
serde_bare = "0.5.0"
serde_bytes = "0.11.7"
async-std = { version = "1.7.0", features = ["attributes"] }
futures = "0.3.24"
rust-fsm = "0.6.0"
getrandom = "0.2.7"
async-channel = "1.7.1"
tempfile = "3"
hex = "0.4.3"
async-trait = "0.1.57"
async-tungstenite = { version = "0.17.2", features = ["async-std-runtime","async-native-tls"] }

@ -0,0 +1,178 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::pin::Pin;
use debug_print::*;
use futures::future::BoxFuture;
use futures::future::OptionFuture;
use futures::FutureExt;
use p2p_repo::types::*;
use p2p_repo::utils::*;
use p2p_net::errors::*;
use p2p_net::types::*;
use rust_fsm::*;
state_machine! {
derive(Debug)
AuthProtocolClient(Ready)
Ready(ClientHelloSent) => ClientHelloSent,
ClientHelloSent(ServerHelloReceived) => ServerHelloReceived,
ServerHelloReceived(ClientAuthSent) => ClientAuthSent,
ClientAuthSent(AuthResultReceived) => AuthResult,
AuthResult => {
Ok => BrokerProtocol,
Error => Closed,
},
}
state_machine! {
derive(Debug)
AuthProtocolServer(Ready)
Ready(ClientHelloReceived) => ClientHelloReceived,
ClientHelloReceived(ServerHelloSent) => ServerHelloSent,
ServerHelloSent(ClientAuthReceived) => ClientAuthReceived,
ClientAuthReceived => {
Ok => AuthResultOk,
Error => AuthResultError,
},
AuthResultOk(AuthResultSent) => BrokerProtocol,
AuthResultError(AuthResultSent) => Closed,
}
pub struct AuthProtocolHandler {
machine: StateMachine<AuthProtocolServer>,
nonce: Option<Vec<u8>>,
user: Option<PubKey>,
}
impl AuthProtocolHandler {
pub fn new() -> AuthProtocolHandler {
AuthProtocolHandler {
machine: StateMachine::new(),
nonce: None,
user: None,
}
}
pub fn get_user(&self) -> Option<PubKey> {
self.user
}
pub fn handle_init(&mut self, client_hello: ClientHello) -> Result<Vec<u8>, ProtocolError> {
let _ = self
.machine
.consume(&AuthProtocolServerInput::ClientHelloReceived)
.map_err(|_e| ProtocolError::InvalidState)?;
let mut random_buf = [0u8; 32];
getrandom::getrandom(&mut random_buf).unwrap();
let nonce = random_buf.to_vec();
let reply = ServerHello::V0(ServerHelloV0 {
nonce: nonce.clone(),
});
self.nonce = Some(nonce);
let _ = self
.machine
.consume(&AuthProtocolServerInput::ServerHelloSent)
.map_err(|_e| ProtocolError::InvalidState)?;
//debug_println!("sending nonce to client: {:?}", self.nonce);
Ok(serde_bare::to_vec(&reply).unwrap())
}
pub fn handle_incoming(
&mut self,
frame: Vec<u8>,
) -> (
Result<Vec<u8>, ProtocolError>,
Pin<Box<OptionFuture<BoxFuture<'static, u16>>>>,
) {
fn prepare_reply(res: Result<Vec<u8>, ProtocolError>) -> AuthResult {
let (result, metadata) = match res {
Ok(m) => (0, m),
Err(e) => (e.into(), vec![]),
};
AuthResult::V0(AuthResultV0 { result, metadata })
}
fn process_state(
handler: &mut AuthProtocolHandler,
frame: Vec<u8>,
) -> Result<Vec<u8>, ProtocolError> {
match handler.machine.state() {
&AuthProtocolServerState::ServerHelloSent => {
let message = serde_bare::from_slice::<ClientAuth>(&frame)?;
let _ = handler
.machine
.consume(&AuthProtocolServerInput::ClientAuthReceived)
.map_err(|_e| ProtocolError::InvalidState)?;
// verifying client auth
debug_println!("verifying client auth");
let _ = verify(
&serde_bare::to_vec(&message.content_v0()).unwrap(),
message.sig(),
message.user(),
)
.map_err(|_e| ProtocolError::AccessDenied)?;
// debug_println!(
// "matching nonce : {:?} {:?}",
// message.nonce(),
// handler.nonce.as_ref().unwrap()
// );
if message.nonce() != handler.nonce.as_ref().unwrap() {
let _ = handler
.machine
.consume(&AuthProtocolServerInput::Error)
.map_err(|_e| ProtocolError::InvalidState);
return Err(ProtocolError::AccessDenied);
}
// TODO check that the device has been registered for this user. if not, return AccessDenied
// all is good, we advance the FSM and send back response
let _ = handler
.machine
.consume(&AuthProtocolServerInput::Ok)
.map_err(|_e| ProtocolError::InvalidState)?;
handler.user = Some(message.user());
Ok(vec![]) // without any metadata
}
_ => Err(ProtocolError::InvalidState),
}
}
let res = process_state(self, frame);
let is_err = res.as_ref().err().cloned();
let reply = prepare_reply(res);
let reply_ser: Result<Vec<u8>, ProtocolError> = Ok(serde_bare::to_vec(&reply).unwrap());
if is_err.is_some() {
(
reply_ser,
Box::pin(OptionFuture::from(Some(
async move { reply.result() }.boxed(),
))),
)
} else {
(reply_ser, Box::pin(OptionFuture::from(None)))
}
}
}

@ -0,0 +1,197 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! User account
use p2p_repo::broker_store::BrokerStore;
use p2p_repo::store::*;
use p2p_repo::types::*;
use p2p_net::types::*;
use serde_bare::to_vec;
pub struct Account<'a> {
/// User ID
id: UserId,
store: &'a dyn BrokerStore,
}
impl<'a> Account<'a> {
const PREFIX: u8 = b"u"[0];
// propertie's suffixes
const CLIENT: u8 = b"c"[0];
const ADMIN: u8 = b"a"[0];
const OVERLAY: u8 = b"o"[0];
const ALL_PROPERTIES: [u8; 3] = [Self::CLIENT, Self::ADMIN, Self::OVERLAY];
const SUFFIX_FOR_EXIST_CHECK: u8 = Self::ADMIN;
pub fn open(id: &UserId, store: &'a dyn BrokerStore) -> Result<Account<'a>, StorageError> {
let opening = Account {
id: id.clone(),
store,
};
if !opening.exists() {
return Err(StorageError::NotFound);
}
Ok(opening)
}
pub fn create(
id: &UserId,
admin: bool,
store: &'a dyn BrokerStore,
) -> Result<Account<'a>, StorageError> {
let acc = Account {
id: id.clone(),
store,
};
if acc.exists() {
return Err(StorageError::BackendError);
}
store.put(
Self::PREFIX,
&to_vec(&id)?,
Some(Self::ADMIN),
to_vec(&admin)?,
)?;
Ok(acc)
}
pub fn exists(&self) -> bool {
self.store
.get(
Self::PREFIX,
&to_vec(&self.id).unwrap(),
Some(Self::SUFFIX_FOR_EXIST_CHECK),
)
.is_ok()
}
pub fn id(&self) -> UserId {
self.id
}
pub fn add_client(&self, client: &ClientId) -> Result<(), StorageError> {
if !self.exists() {
return Err(StorageError::BackendError);
}
self.store.put(
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::CLIENT),
to_vec(client)?,
)
}
pub fn remove_client(&self, client: &ClientId) -> Result<(), StorageError> {
self.store.del_property_value(
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::CLIENT),
to_vec(client)?,
)
}
pub fn has_client(&self, client: &ClientId) -> Result<(), StorageError> {
self.store.has_property_value(
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::CLIENT),
to_vec(client)?,
)
}
pub fn add_overlay(&self, overlay: &OverlayId) -> Result<(), StorageError> {
if !self.exists() {
return Err(StorageError::BackendError);
}
self.store.put(
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::OVERLAY),
to_vec(overlay)?,
)
}
pub fn remove_overlay(&self, overlay: &OverlayId) -> Result<(), StorageError> {
self.store.del_property_value(
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::OVERLAY),
to_vec(overlay)?,
)
}
pub fn has_overlay(&self, overlay: &OverlayId) -> Result<(), StorageError> {
self.store.has_property_value(
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::OVERLAY),
to_vec(overlay)?,
)
}
pub fn is_admin(&self) -> Result<bool, StorageError> {
if self
.store
.has_property_value(
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::ADMIN),
to_vec(&true)?,
)
.is_ok()
{
return Ok(true);
}
Ok(false)
}
pub fn del(&self) -> Result<(), StorageError> {
self.store
.del_all(Self::PREFIX, &to_vec(&self.id)?, &Self::ALL_PROPERTIES)
}
}
#[cfg(test)]
mod test {
use p2p_repo::store::*;
use p2p_repo::types::*;
use p2p_repo::utils::*;
use p2p_stores_lmdb::broker_store::LmdbBrokerStore;
use std::fs;
use tempfile::Builder;
use crate::broker_store_account::Account;
#[test]
pub fn test_account() {
let path_str = "test-env";
let root = Builder::new().prefix(path_str).tempdir().unwrap();
let key: [u8; 32] = [0; 32];
fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap());
let mut store = LmdbBrokerStore::open(root.path(), key);
let user_id = PubKey::Ed25519PubKey([1; 32]);
let account = Account::create(&user_id, true, &store).unwrap();
println!("account created {}", account.id());
let account2 = Account::open(&user_id, &store).unwrap();
println!("account opened {}", account2.id());
let client_id = PubKey::Ed25519PubKey([56; 32]);
let client_id_not_added = PubKey::Ed25519PubKey([57; 32]);
account2.add_client(&client_id).unwrap();
assert!(account2.is_admin().unwrap());
account.has_client(&client_id).unwrap();
assert!(account.has_client(&client_id_not_added).is_err());
}
}

@ -0,0 +1,103 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Broker Config, persisted to store
use p2p_repo::broker_store::BrokerStore;
use p2p_repo::store::*;
use p2p_repo::types::*;
use p2p_net::types::*;
use serde::{Deserialize, Serialize};
use serde_bare::{from_slice, to_vec};
// TODO: versioning V0
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub enum ConfigMode {
Local,
Core,
}
pub struct Config<'a> {
store: &'a dyn BrokerStore,
}
impl<'a> Config<'a> {
const PREFIX: u8 = b"c"[0];
const KEY: [u8; 5] = *b"onfig";
// propertie's suffixes
const MODE: u8 = b"m"[0];
const ALL_PROPERTIES: [u8; 1] = [Self::MODE];
const SUFFIX_FOR_EXIST_CHECK: u8 = Self::MODE;
pub fn open(store: &'a dyn BrokerStore) -> Result<Config<'a>, StorageError> {
let opening = Config { store };
if !opening.exists() {
return Err(StorageError::NotFound);
}
Ok(opening)
}
pub fn get_or_create(
mode: &ConfigMode,
store: &'a dyn BrokerStore,
) -> Result<Config<'a>, StorageError> {
match Self::open(store) {
Err(e) => {
if e == StorageError::NotFound {
Self::create(mode, store)
} else {
Err(StorageError::BackendError)
}
}
Ok(p) => {
if &p.mode().unwrap() != mode {
return Err(StorageError::InvalidValue);
}
Ok(p)
}
}
}
pub fn create(
mode: &ConfigMode,
store: &'a dyn BrokerStore,
) -> Result<Config<'a>, StorageError> {
let acc = Config { store };
if acc.exists() {
return Err(StorageError::BackendError);
}
store.put(
Self::PREFIX,
&to_vec(&Self::KEY)?,
Some(Self::MODE),
to_vec(&mode)?,
)?;
Ok(acc)
}
pub fn exists(&self) -> bool {
self.store
.get(
Self::PREFIX,
&to_vec(&Self::KEY).unwrap(),
Some(Self::SUFFIX_FOR_EXIST_CHECK),
)
.is_ok()
}
pub fn mode(&self) -> Result<ConfigMode, StorageError> {
match self
.store
.get(Self::PREFIX, &to_vec(&Self::KEY)?, Some(Self::MODE))
{
Ok(ver) => Ok(from_slice::<ConfigMode>(&ver)?),
Err(e) => Err(e),
}
}
}

@ -0,0 +1,219 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Overlay
use p2p_repo::broker_store::BrokerStore;
use p2p_repo::store::*;
use p2p_repo::types::*;
use p2p_net::types::*;
use p2p_repo::utils::now_timestamp;
use serde::{Deserialize, Serialize};
use serde_bare::{from_slice, to_vec};
// TODO: versioning V0
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub struct OverlayMeta {
pub users: u32,
pub last_used: Timestamp,
}
pub struct Overlay<'a> {
/// Overlay ID
id: OverlayId,
store: &'a dyn BrokerStore,
}
impl<'a> Overlay<'a> {
const PREFIX: u8 = b"o"[0];
// propertie's suffixes
const SECRET: u8 = b"s"[0];
const PEER: u8 = b"p"[0];
const TOPIC: u8 = b"t"[0];
const META: u8 = b"m"[0];
const REPO: u8 = b"r"[0];
const ALL_PROPERTIES: [u8; 5] = [
Self::SECRET,
Self::PEER,
Self::TOPIC,
Self::META,
Self::REPO,
];
const SUFFIX_FOR_EXIST_CHECK: u8 = Self::SECRET;
pub fn open(id: &OverlayId, store: &'a dyn BrokerStore) -> Result<Overlay<'a>, StorageError> {
let opening = Overlay {
id: id.clone(),
store,
};
if !opening.exists() {
return Err(StorageError::NotFound);
}
Ok(opening)
}
pub fn create(
id: &OverlayId,
secret: &SymKey,
repo: Option<PubKey>,
store: &'a dyn BrokerStore,
) -> Result<Overlay<'a>, StorageError> {
let acc = Overlay {
id: id.clone(),
store,
};
if acc.exists() {
return Err(StorageError::BackendError);
}
store.put(
Self::PREFIX,
&to_vec(&id)?,
Some(Self::SECRET),
to_vec(&secret)?,
)?;
if repo.is_some() {
store.put(
Self::PREFIX,
&to_vec(&id)?,
Some(Self::REPO),
to_vec(&repo.unwrap())?,
)?;
//TODO if failure, should remove the previously added SECRET property
}
let meta = OverlayMeta {
users: 1,
last_used: now_timestamp(),
};
store.put(
Self::PREFIX,
&to_vec(&id)?,
Some(Self::META),
to_vec(&meta)?,
)?;
//TODO if failure, should remove the previously added SECRET and REPO properties
Ok(acc)
}
pub fn exists(&self) -> bool {
self.store
.get(
Self::PREFIX,
&to_vec(&self.id).unwrap(),
Some(Self::SUFFIX_FOR_EXIST_CHECK),
)
.is_ok()
}
pub fn id(&self) -> OverlayId {
self.id
}
pub fn add_peer(&self, peer: &PeerId) -> Result<(), StorageError> {
if !self.exists() {
return Err(StorageError::BackendError);
}
self.store.put(
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::PEER),
to_vec(peer)?,
)
}
pub fn remove_peer(&self, peer: &PeerId) -> Result<(), StorageError> {
self.store.del_property_value(
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::PEER),
to_vec(peer)?,
)
}
pub fn has_peer(&self, peer: &PeerId) -> Result<(), StorageError> {
self.store.has_property_value(
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::PEER),
to_vec(peer)?,
)
}
pub fn add_topic(&self, topic: &TopicId) -> Result<(), StorageError> {
if !self.exists() {
return Err(StorageError::BackendError);
}
self.store.put(
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::TOPIC),
to_vec(topic)?,
)
}
pub fn remove_topic(&self, topic: &TopicId) -> Result<(), StorageError> {
self.store.del_property_value(
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::TOPIC),
to_vec(topic)?,
)
}
pub fn has_topic(&self, topic: &TopicId) -> Result<(), StorageError> {
self.store.has_property_value(
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::TOPIC),
to_vec(topic)?,
)
}
pub fn secret(&self) -> Result<SymKey, StorageError> {
match self
.store
.get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::SECRET))
{
Ok(secret) => Ok(from_slice::<SymKey>(&secret)?),
Err(e) => Err(e),
}
}
pub fn metadata(&self) -> Result<OverlayMeta, StorageError> {
match self
.store
.get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::META))
{
Ok(meta) => Ok(from_slice::<OverlayMeta>(&meta)?),
Err(e) => Err(e),
}
}
pub fn set_metadata(&self, meta: &OverlayMeta) -> Result<(), StorageError> {
if !self.exists() {
return Err(StorageError::BackendError);
}
self.store.replace(
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::META),
to_vec(meta)?,
)
}
pub fn repo(&self) -> Result<PubKey, StorageError> {
match self
.store
.get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::REPO))
{
Ok(repo) => Ok(from_slice::<PubKey>(&repo)?),
Err(e) => Err(e),
}
}
pub fn del(&self) -> Result<(), StorageError> {
self.store
.del_all(Self::PREFIX, &to_vec(&self.id)?, &Self::ALL_PROPERTIES)
}
}

@ -0,0 +1,163 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Peer
use p2p_repo::broker_store::BrokerStore;
use p2p_repo::store::*;
use p2p_repo::types::*;
use p2p_net::types::*;
use serde::{Deserialize, Serialize};
use serde_bare::{from_slice, to_vec};
pub struct Peer<'a> {
/// Topic ID
id: PeerId,
store: &'a dyn BrokerStore,
}
impl<'a> Peer<'a> {
const PREFIX: u8 = b"p"[0];
// propertie's suffixes
const VERSION: u8 = b"v"[0];
const ADVERT: u8 = b"a"[0];
const ALL_PROPERTIES: [u8; 2] = [Self::VERSION, Self::ADVERT];
const SUFFIX_FOR_EXIST_CHECK: u8 = Self::VERSION;
pub fn open(id: &PeerId, store: &'a dyn BrokerStore) -> Result<Peer<'a>, StorageError> {
let opening = Peer {
id: id.clone(),
store,
};
if !opening.exists() {
return Err(StorageError::NotFound);
}
Ok(opening)
}
pub fn update_or_create(
advert: &PeerAdvert,
store: &'a dyn BrokerStore,
) -> Result<Peer<'a>, StorageError> {
let id = advert.peer();
match Self::open(id, store) {
Err(e) => {
if e == StorageError::NotFound {
Self::create(advert, store)
} else {
Err(StorageError::BackendError)
}
}
Ok(p) => {
p.update_advert(advert)?;
Ok(p)
}
}
}
pub fn create(
advert: &PeerAdvert,
store: &'a dyn BrokerStore,
) -> Result<Peer<'a>, StorageError> {
let id = advert.peer();
let acc = Peer {
id: id.clone(),
store,
};
if acc.exists() {
return Err(StorageError::BackendError);
}
store.put(
Self::PREFIX,
&to_vec(&id)?,
Some(Self::VERSION),
to_vec(&advert.version())?,
)?;
store.put(
Self::PREFIX,
&to_vec(&id)?,
Some(Self::ADVERT),
to_vec(&advert)?,
)?;
Ok(acc)
}
pub fn exists(&self) -> bool {
self.store
.get(
Self::PREFIX,
&to_vec(&self.id).unwrap(),
Some(Self::SUFFIX_FOR_EXIST_CHECK),
)
.is_ok()
}
pub fn id(&self) -> PeerId {
self.id
}
pub fn version(&self) -> Result<u32, StorageError> {
match self
.store
.get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::VERSION))
{
Ok(ver) => Ok(from_slice::<u32>(&ver)?),
Err(e) => Err(e),
}
}
pub fn set_version(&self, version: u32) -> Result<(), StorageError> {
if !self.exists() {
return Err(StorageError::BackendError);
}
self.store.replace(
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::VERSION),
to_vec(&version)?,
)
}
pub fn update_advert(&self, advert: &PeerAdvert) -> Result<(), StorageError> {
if advert.peer() != &self.id {
return Err(StorageError::InvalidValue);
}
let current_advert = self.advert().map_err(|e| StorageError::BackendError)?;
if current_advert.version() >= advert.version() {
return Ok(());
}
self.store.replace(
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::ADVERT),
to_vec(advert)?,
)
}
pub fn advert(&self) -> Result<PeerAdvert, StorageError> {
match self
.store
.get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::ADVERT))
{
Ok(advert) => Ok(from_slice::<PeerAdvert>(&advert)?),
Err(e) => Err(e),
}
}
pub fn set_advert(&self, advert: &PeerAdvert) -> Result<(), StorageError> {
if !self.exists() {
return Err(StorageError::BackendError);
}
self.store.replace(
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::ADVERT),
to_vec(advert)?,
)
}
pub fn del(&self) -> Result<(), StorageError> {
self.store
.del_all(Self::PREFIX, &to_vec(&self.id)?, &Self::ALL_PROPERTIES)
}
}

@ -0,0 +1,103 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! RepoStore information about each RepoStore
//! It contains the symKeys to open the RepoStores
//! A repoStore is identified by its repo pubkey if in local mode
//! In core mode, it is identified by the overlayid.
use p2p_repo::broker_store::BrokerStore;
use p2p_repo::store::*;
use p2p_repo::types::*;
use p2p_net::types::*;
use serde::{Deserialize, Serialize};
use serde_bare::{from_slice, to_vec};
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)]
pub enum RepoStoreId {
Overlay(OverlayId),
Repo(PubKey),
}
impl From<RepoStoreId> for String {
fn from(id: RepoStoreId) -> Self {
hex::encode(to_vec(&id).unwrap())
}
}
pub struct RepoStoreInfo<'a> {
/// RepoStore ID
id: RepoStoreId,
store: &'a dyn BrokerStore,
}
impl<'a> RepoStoreInfo<'a> {
const PREFIX: u8 = b"r"[0];
// propertie's suffixes
const KEY: u8 = b"k"[0];
const ALL_PROPERTIES: [u8; 1] = [Self::KEY];
const SUFFIX_FOR_EXIST_CHECK: u8 = Self::KEY;
pub fn open(
id: &RepoStoreId,
store: &'a dyn BrokerStore,
) -> Result<RepoStoreInfo<'a>, StorageError> {
let opening = RepoStoreInfo {
id: id.clone(),
store,
};
if !opening.exists() {
return Err(StorageError::NotFound);
}
Ok(opening)
}
pub fn create(
id: &RepoStoreId,
key: &SymKey,
store: &'a dyn BrokerStore,
) -> Result<RepoStoreInfo<'a>, StorageError> {
let acc = RepoStoreInfo {
id: id.clone(),
store,
};
if acc.exists() {
return Err(StorageError::BackendError);
}
store.put(Self::PREFIX, &to_vec(&id)?, Some(Self::KEY), to_vec(key)?)?;
Ok(acc)
}
pub fn exists(&self) -> bool {
self.store
.get(
Self::PREFIX,
&to_vec(&self.id).unwrap(),
Some(Self::SUFFIX_FOR_EXIST_CHECK),
)
.is_ok()
}
pub fn id(&self) -> &RepoStoreId {
&self.id
}
pub fn key(&self) -> Result<SymKey, StorageError> {
match self
.store
.get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::KEY))
{
Ok(k) => Ok(from_slice::<SymKey>(&k)?),
Err(e) => Err(e),
}
}
pub fn del(&self) -> Result<(), StorageError> {
self.store
.del_all(Self::PREFIX, &to_vec(&self.id)?, &Self::ALL_PROPERTIES)
}
}

@ -0,0 +1,136 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Topic
use p2p_repo::broker_store::BrokerStore;
use p2p_repo::store::*;
use p2p_repo::types::*;
use p2p_net::types::*;
use serde::{Deserialize, Serialize};
use serde_bare::{from_slice, to_vec};
// TODO: versioning V0
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
pub struct TopicMeta {
pub users: u32,
}
pub struct Topic<'a> {
/// Topic ID
id: TopicId,
store: &'a dyn BrokerStore,
}
impl<'a> Topic<'a> {
const PREFIX: u8 = b"t"[0];
// propertie's suffixes
const ADVERT: u8 = b"a"[0];
const HEAD: u8 = b"h"[0];
const META: u8 = b"m"[0];
const ALL_PROPERTIES: [u8; 3] = [Self::ADVERT, Self::HEAD, Self::META];
const SUFFIX_FOR_EXIST_CHECK: u8 = Self::META;
pub fn open(id: &TopicId, store: &'a dyn BrokerStore) -> Result<Topic<'a>, StorageError> {
let opening = Topic {
id: id.clone(),
store,
};
if !opening.exists() {
return Err(StorageError::NotFound);
}
Ok(opening)
}
pub fn create(id: &TopicId, store: &'a dyn BrokerStore) -> Result<Topic<'a>, StorageError> {
let acc = Topic {
id: id.clone(),
store,
};
if acc.exists() {
return Err(StorageError::BackendError);
}
let meta = TopicMeta { users: 0 };
store.put(
Self::PREFIX,
&to_vec(&id)?,
Some(Self::META),
to_vec(&meta)?,
)?;
Ok(acc)
}
pub fn exists(&self) -> bool {
self.store
.get(
Self::PREFIX,
&to_vec(&self.id).unwrap(),
Some(Self::SUFFIX_FOR_EXIST_CHECK),
)
.is_ok()
}
pub fn id(&self) -> TopicId {
self.id
}
pub fn add_head(&self, head: &ObjectId) -> Result<(), StorageError> {
if !self.exists() {
return Err(StorageError::BackendError);
}
self.store.put(
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::HEAD),
to_vec(head)?,
)
}
pub fn remove_head(&self, head: &ObjectId) -> Result<(), StorageError> {
self.store.del_property_value(
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::HEAD),
to_vec(head)?,
)
}
pub fn has_head(&self, head: &ObjectId) -> Result<(), StorageError> {
self.store.has_property_value(
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::HEAD),
to_vec(head)?,
)
}
pub fn metadata(&self) -> Result<TopicMeta, StorageError> {
match self
.store
.get(Self::PREFIX, &to_vec(&self.id)?, Some(Self::META))
{
Ok(meta) => Ok(from_slice::<TopicMeta>(&meta)?),
Err(e) => Err(e),
}
}
pub fn set_metadata(&self, meta: &TopicMeta) -> Result<(), StorageError> {
if !self.exists() {
return Err(StorageError::BackendError);
}
self.store.replace(
Self::PREFIX,
&to_vec(&self.id)?,
Some(Self::META),
to_vec(meta)?,
)
}
pub fn del(&self) -> Result<(), StorageError> {
self.store
.del_all(Self::PREFIX, &to_vec(&self.id)?, &Self::ALL_PROPERTIES)
}
}

@ -0,0 +1,148 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Connection to a Broker, can be local or remote.
//! If remote, it will use a Stream and Sink of framed messages
use futures::{
ready,
stream::Stream,
task::{Context, Poll},
Future,
select, FutureExt,
};
use futures::channel::mpsc;
use std::pin::Pin;
use std::{collections::HashSet, fmt::Debug};
use crate::server::BrokerServer;
use debug_print::*;
use futures::{pin_mut, stream, Sink, SinkExt, StreamExt};
use p2p_repo::object::*;
use p2p_repo::store::*;
use p2p_repo::types::*;
use p2p_repo::utils::*;
use p2p_net::errors::*;
use p2p_net::types::*;
use p2p_net::broker_connection::*;
use std::collections::HashMap;
pub struct BrokerConnectionLocal<'a> {
broker: &'a mut BrokerServer,
user: PubKey,
}
#[async_trait::async_trait]
impl<'a> BrokerConnection for BrokerConnectionLocal<'a> {
type OC = BrokerConnectionLocal<'a>;
type BlockStream = async_channel::Receiver<Block>;
async fn close(&mut self) {}
async fn add_user(
&mut self,
user_id: PubKey,
admin_user_pk: PrivKey,
) -> Result<(), ProtocolError> {
let op_content = AddUserContentV0 { user: user_id };
let sig = sign(admin_user_pk, self.user, &serde_bare::to_vec(&op_content)?)?;
self.broker.add_user(self.user, user_id, sig)
}
async fn process_overlay_request(
&mut self,
overlay: OverlayId,
request: BrokerOverlayRequestContentV0,
) -> Result<(), ProtocolError> {
match request {
BrokerOverlayRequestContentV0::OverlayConnect(_) => {
self.broker.connect_overlay(self.user, overlay)
}
BrokerOverlayRequestContentV0::OverlayJoin(j) => {
self.broker
.join_overlay(self.user, overlay, j.repo_pubkey(), j.secret(), j.peers())
}
BrokerOverlayRequestContentV0::ObjectPin(op) => {
self.broker.pin_object(self.user, overlay, op.id())
}
BrokerOverlayRequestContentV0::ObjectUnpin(op) => {
self.broker.unpin_object(self.user, overlay, op.id())
}
BrokerOverlayRequestContentV0::ObjectDel(op) => {
self.broker.del_object(self.user, overlay, op.id())
}
BrokerOverlayRequestContentV0::BlockPut(b) => {
self.broker.put_block(self.user, overlay, b.block())
}
_ => Err(ProtocolError::InvalidState),
}
}
async fn process_overlay_request_objectid_response(
&mut self,
overlay: OverlayId,
request: BrokerOverlayRequestContentV0,
) -> Result<ObjectId, ProtocolError> {
match request {
BrokerOverlayRequestContentV0::ObjectCopy(oc) => {
self.broker
.copy_object(self.user, overlay, oc.id(), oc.expiry())
}
_ => Err(ProtocolError::InvalidState),
}
}
async fn process_overlay_request_stream_response(
&mut self,
overlay: OverlayId,
request: BrokerOverlayRequestContentV0,
) -> Result<Pin<Box<Self::BlockStream>>, ProtocolError> {
match request {
BrokerOverlayRequestContentV0::BlockGet(b) => self
.broker
.get_block(self.user, overlay, b.id(), b.include_children(), b.topic())
.map(|r| Box::pin(r)),
BrokerOverlayRequestContentV0::BranchSyncReq(b) => self
.broker
.sync_branch(
self.user,
&overlay,
b.heads(),
b.known_heads(),
b.known_commits(),
)
.map(|r| Box::pin(r)),
_ => Err(ProtocolError::InvalidState),
}
}
async fn del_user(&mut self, user_id: PubKey, admin_user_pk: PrivKey) {}
async fn add_client(&mut self, user_id: PubKey, admin_user_pk: PrivKey) {}
async fn del_client(&mut self, user_id: PubKey, admin_user_pk: PrivKey) {}
async fn overlay_connect(
&mut self,
repo_link: &RepoLink,
public: bool,
) -> Result<OverlayConnectionClient<BrokerConnectionLocal<'a>>, ProtocolError> {
let overlay = self.process_overlay_connect(repo_link, public).await?;
Ok(OverlayConnectionClient::create(self, overlay, repo_link.clone()))
}
}
impl<'a> BrokerConnectionLocal<'a> {
pub fn new(broker: &'a mut BrokerServer, user: PubKey) -> BrokerConnectionLocal<'a> {
BrokerConnectionLocal { broker, user }
}
}

@ -0,0 +1,20 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
pub mod broker_store_account;
pub mod broker_store_config;
pub mod broker_store_overlay;
pub mod broker_store_peer;
pub mod broker_store_repostoreinfo;
pub mod broker_store_topic;
pub mod connection_local;
pub mod server;
pub mod server_ws;
pub mod auth;

@ -0,0 +1,891 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! A Broker server
use std::collections::HashMap;
use std::collections::HashSet;
use std::pin::Pin;
use std::sync::Arc;
use std::sync::RwLock;
use crate::broker_store_account::Account;
use crate::auth::*;
use crate::broker_store_config::Config;
use crate::broker_store_config::ConfigMode;
use crate::connection_local::BrokerConnectionLocal;
use crate::broker_store_overlay::Overlay;
use crate::broker_store_peer::Peer;
use crate::broker_store_repostoreinfo::RepoStoreId;
use crate::broker_store_repostoreinfo::RepoStoreInfo;
use async_std::task;
use debug_print::*;
use futures::future::BoxFuture;
use futures::future::OptionFuture;
use futures::FutureExt;
use futures::Stream;
use p2p_repo::object::Object;
use p2p_repo::store::RepoStore;
use p2p_repo::store::StorageError;
use p2p_repo::types::*;
use p2p_repo::utils::*;
use p2p_net::errors::*;
use p2p_net::types::*;
use p2p_stores_lmdb::broker_store::LmdbBrokerStore;
use p2p_stores_lmdb::repo_store::LmdbRepoStore;
#[derive(Debug, Eq, PartialEq, Clone)]
pub enum BrokerError {
CannotStart,
MismatchedMode,
OverlayNotFound,
}
impl From<BrokerError> for ProtocolError {
fn from(e: BrokerError) -> Self {
match e {
BrokerError::CannotStart => ProtocolError::OverlayNotFound,
BrokerError::OverlayNotFound => ProtocolError::OverlayNotFound,
_ => ProtocolError::BrokerError,
}
}
}
impl From<p2p_repo::store::StorageError> for BrokerError {
fn from(e: p2p_repo::store::StorageError) -> Self {
match e {
p2p_repo::store::StorageError::InvalidValue => BrokerError::MismatchedMode,
_ => BrokerError::CannotStart,
}
}
}
#[derive(Debug)]
enum ProtocolType {
Start,
Auth,
Broker,
Ext,
P2P,
}
pub struct ProtocolHandler {
broker: Arc<BrokerServer>,
protocol: ProtocolType,
auth_protocol: Option<AuthProtocolHandler>,
broker_protocol: Option<BrokerProtocolHandler>,
ext_protocol: Option<ExtProtocolHandler>,
r: Option<async_channel::Receiver<Vec<u8>>>,
s: async_channel::Sender<Vec<u8>>,
}
impl ProtocolHandler {
pub fn async_frames_receiver(&mut self) -> async_channel::Receiver<Vec<u8>> {
self.r.take().unwrap()
}
/// Handle incoming message
pub async fn handle_incoming(
&mut self,
frame: Vec<u8>,
) -> (
Result<Vec<u8>, ProtocolError>,
OptionFuture<BoxFuture<'static, u16>>,
) {
//debug_println!("SERVER PROTOCOL {:?}", &self.protocol);
match &self.protocol {
ProtocolType::Start => {
let message = serde_bare::from_slice::<StartProtocol>(&frame);
match message {
Ok(StartProtocol::Auth(b)) => {
self.protocol = ProtocolType::Auth;
self.auth_protocol = Some(AuthProtocolHandler::new());
return (
self.auth_protocol.as_mut().unwrap().handle_init(b),
OptionFuture::from(None),
);
}
Ok(StartProtocol::Ext(ext)) => {
self.protocol = ProtocolType::Ext;
self.ext_protocol = Some(ExtProtocolHandler {});
let reply = self.ext_protocol.as_ref().unwrap().handle_incoming(ext);
return (
Ok(serde_bare::to_vec(&reply).unwrap()),
OptionFuture::from(None),
);
}
Err(e) => {
return (Err(ProtocolError::SerializationError),OptionFuture::from(None))
}
}
}
ProtocolType::Auth => {
let res = self.auth_protocol.as_mut().unwrap().handle_incoming(frame);
match res.1.await {
None => {
// we switch to Broker protocol
self.protocol = ProtocolType::Broker;
self.broker_protocol = Some(BrokerProtocolHandler {
user: self.auth_protocol.as_ref().unwrap().get_user().unwrap(),
broker: Arc::clone(&self.broker),
async_frames_sender: self.s.clone(),
});
self.auth_protocol = None;
(res.0, OptionFuture::from(None))
}
Some(e) => (res.0, OptionFuture::from(Some(async move { e }.boxed()))),
}
}
ProtocolType::Broker => {
let message = serde_bare::from_slice::<BrokerMessage>(&frame);
match (message) {
Ok(message) => {
let reply = self
.broker_protocol
.as_ref()
.unwrap()
.handle_incoming(message)
.await;
(Ok(serde_bare::to_vec(&reply.0).unwrap()), reply.1)
}
Err(e_) => {
(Err(ProtocolError::SerializationError),OptionFuture::from(None))
}
}
}
ProtocolType::Ext => {
// Ext protocol is not accepting 2 extrequest in the same connection.
// closing the connection
(Err(ProtocolError::InvalidState), OptionFuture::from(None))
}
ProtocolType::P2P => {
unimplemented!()
}
}
}
}
pub struct ExtProtocolHandler {}
impl ExtProtocolHandler {
pub fn handle_incoming(&self, msg: ExtRequest) -> ExtResponse {
unimplemented!()
}
}
pub struct BrokerProtocolHandler {
broker: Arc<BrokerServer>,
user: PubKey,
async_frames_sender: async_channel::Sender<Vec<u8>>,
}
use std::{thread, time};
impl BrokerProtocolHandler {
fn prepare_reply_broker_message(
res: Result<(), ProtocolError>,
id: u64,
padding_size: usize,
) -> BrokerMessage {
let result = match res {
Ok(_) => 0,
Err(e) => e.into(),
};
let msg = BrokerMessage::V0(BrokerMessageV0 {
padding: vec![0; padding_size],
content: BrokerMessageContentV0::BrokerResponse(BrokerResponse::V0(BrokerResponseV0 {
id,
result,
})),
});
msg
}
fn prepare_reply_broker_overlay_message(
res: Result<(), ProtocolError>,
id: u64,
overlay: OverlayId,
block: Option<Block>,
padding_size: usize,
) -> BrokerMessage {
let result = match res {
Ok(_) => 0,
Err(e) => e.into(),
};
let content = match block {
Some(b) => Some(BrokerOverlayResponseContentV0::Block(b)),
None => None,
};
let msg = BrokerMessage::V0(BrokerMessageV0 {
padding: vec![0; padding_size],
content: BrokerMessageContentV0::BrokerOverlayMessage(BrokerOverlayMessage::V0(
BrokerOverlayMessageV0 {
overlay,
content: BrokerOverlayMessageContentV0::BrokerOverlayResponse(
BrokerOverlayResponse::V0(BrokerOverlayResponseV0 {
id,
result,
content,
}),
),
},
)),
});
msg
}
fn prepare_reply_broker_overlay_message_stream(
res: Result<Block, ProtocolError>,
id: u64,
overlay: OverlayId,
padding_size: usize,
) -> BrokerMessage {
let result: u16 = match &res {
Ok(r) => ProtocolError::PartialContent.into(),
Err(e) => (*e).clone().into(),
};
let content = match res {
Ok(r) => Some(BrokerOverlayResponseContentV0::Block(r)),
Err(_) => None,
};
let msg = BrokerMessage::V0(BrokerMessageV0 {
padding: vec![0; padding_size],
content: BrokerMessageContentV0::BrokerOverlayMessage(BrokerOverlayMessage::V0(
BrokerOverlayMessageV0 {
overlay,
content: BrokerOverlayMessageContentV0::BrokerOverlayResponse(
BrokerOverlayResponse::V0(BrokerOverlayResponseV0 {
id,
result,
content,
}),
),
},
)),
});
msg
}
async fn send_block_stream_response_to_client(
&self,
res: Result<async_channel::Receiver<Block>, ProtocolError>,
id: u64,
overlay: OverlayId,
padding_size: usize,
) -> (BrokerMessage, OptionFuture<BoxFuture<'static, u16>>) {
// return an error or the first block, and setup a spawner for the remaining blocks to be sent.
let one_reply: (
Result<Block, ProtocolError>,
OptionFuture<BoxFuture<'static, u16>>,
) = match res {
Err(e) => (Err(e), OptionFuture::from(None)),
Ok(stream) => {
let one = stream
.recv_blocking()
.map_err(|e| ProtocolError::EndOfStream);
if one.is_ok() {
let sender = self.async_frames_sender.clone();
let a = OptionFuture::from(Some(
async move {
while let Ok(next) = stream.recv().await {
let msg = Self::prepare_reply_broker_overlay_message_stream(
Ok(next),
id,
overlay,
padding_size,
);
let res = sender.send(serde_bare::to_vec(&msg).unwrap()).await;
if res.is_err() {
break;
}
}
// sending end of stream
let msg = Self::prepare_reply_broker_overlay_message_stream(
Err(ProtocolError::EndOfStream),
id,
overlay,
padding_size,
);
let _ = sender.send(serde_bare::to_vec(&msg).unwrap()).await;
0
}
.boxed(),
));
(one, a)
} else {
(one, OptionFuture::from(None))
}
}
};
return (
Self::prepare_reply_broker_overlay_message_stream(
one_reply.0,
id,
overlay,
padding_size,
),
one_reply.1,
);
}
pub async fn handle_incoming(
&self,
msg: BrokerMessage,
) -> (BrokerMessage, OptionFuture<BoxFuture<'static, u16>>) {
let padding_size = 20; // TODO randomize, if config of server contains padding_max
let id = msg.id();
let content = msg.content();
match content {
BrokerMessageContentV0::BrokerRequest(req) => (
Self::prepare_reply_broker_message(
match req.content_v0() {
BrokerRequestContentV0::AddUser(cmd) => {
self.broker.add_user(self.user, cmd.user(), cmd.sig())
}
BrokerRequestContentV0::DelUser(cmd) => {
self.broker.del_user(self.user, cmd.user(), cmd.sig())
}
BrokerRequestContentV0::AddClient(cmd) => {
self.broker.add_client(self.user, cmd.client(), cmd.sig())
}
BrokerRequestContentV0::DelClient(cmd) => {
self.broker.del_client(self.user, cmd.client(), cmd.sig())
}
},
id,
padding_size,
),
OptionFuture::from(None),
),
BrokerMessageContentV0::BrokerResponse(res) => (
Self::prepare_reply_broker_message(
Err(ProtocolError::InvalidState),
id,
padding_size,
),
OptionFuture::from(None),
),
BrokerMessageContentV0::BrokerOverlayMessage(omsg) => {
let overlay = omsg.overlay_id();
let block = None;
let mut res = Err(ProtocolError::InvalidState);
if omsg.is_request() {
match omsg.overlay_request().content_v0() {
BrokerOverlayRequestContentV0::OverlayConnect(_) => {
res = self.broker.connect_overlay(self.user, overlay)
}
BrokerOverlayRequestContentV0::OverlayJoin(j) => {
res = self.broker.join_overlay(
self.user,
overlay,
j.repo_pubkey(),
j.secret(),
j.peers(),
)
}
BrokerOverlayRequestContentV0::ObjectDel(op) => {
res = self.broker.del_object(self.user, overlay, op.id())
}
BrokerOverlayRequestContentV0::ObjectPin(op) => {
res = self.broker.pin_object(self.user, overlay, op.id())
}
BrokerOverlayRequestContentV0::ObjectUnpin(op) => {
res = self.broker.unpin_object(self.user, overlay, op.id())
}
BrokerOverlayRequestContentV0::BlockPut(b) => {
res = self.broker.put_block(self.user, overlay, b.block())
}
BrokerOverlayRequestContentV0::BranchSyncReq(b) => {
let res = self.broker.sync_branch(
self.user,
&overlay,
b.heads(),
b.known_heads(),
b.known_commits(),
);
return self
.send_block_stream_response_to_client(
res,
id,
overlay,
padding_size,
)
.await;
}
BrokerOverlayRequestContentV0::BlockGet(b) => {
let res = self.broker.get_block(
self.user,
overlay,
b.id(),
b.include_children(),
b.topic(),
);
return self
.send_block_stream_response_to_client(
res,
id,
overlay,
padding_size,
)
.await;
}
_ => {}
}
}
(
Self::prepare_reply_broker_overlay_message(
res,
id,
overlay,
block,
padding_size,
),
OptionFuture::from(None),
)
}
}
}
}
const REPO_STORES_SUBDIR: &str = "repos";
pub struct BrokerServer {
store: LmdbBrokerStore,
mode: ConfigMode,
repo_stores: Arc<RwLock<HashMap<RepoStoreId, LmdbRepoStore>>>,
// only used in ConfigMode::Local
// try to change it to this version below in order to avoid double hashmap lookup in local mode. but hard to do...
//overlayid_to_repostore: HashMap<RepoStoreId, &'a LmdbRepoStore>,
overlayid_to_repostore: Arc<RwLock<HashMap<OverlayId, RepoStoreId>>>,
}
impl BrokerServer {
pub fn new(store: LmdbBrokerStore, mode: ConfigMode) -> Result<BrokerServer, BrokerError> {
let mut configmode: ConfigMode;
{
let config = Config::get_or_create(&mode, &store)?;
configmode = config.mode()?;
}
Ok(BrokerServer {
store,
mode: configmode,
repo_stores: Arc::new(RwLock::new(HashMap::new())),
overlayid_to_repostore: Arc::new(RwLock::new(HashMap::new())),
})
}
fn open_or_create_repostore<F, R>(
&self,
repostore_id: RepoStoreId,
f: F,
) -> Result<R, ProtocolError>
where
F: FnOnce(&LmdbRepoStore) -> Result<R, ProtocolError>,
{
// first let's find it in the BrokerStore.repostoreinfo table in order to get the encryption key
let info = RepoStoreInfo::open(&repostore_id, &self.store)
.map_err(|e| BrokerError::OverlayNotFound)?;
let key = info.key()?;
let mut path = self.store.path();
path.push(REPO_STORES_SUBDIR);
path.push::<String>(repostore_id.clone().into());
std::fs::create_dir_all(path.clone()).map_err(|_e| ProtocolError::WriteError )?;
println!("path for repo store: {}", path.to_str().unwrap());
let repo = LmdbRepoStore::open(&path, *key.slice());
let mut writer = self.repo_stores.write().expect("write repo_store hashmap");
writer.insert(repostore_id.clone(), repo);
f(writer.get(&repostore_id).unwrap())
}
fn get_repostore_from_overlay_id<F, R>(
&self,
overlay_id: &OverlayId,
f: F,
) -> Result<R, ProtocolError>
where
F: FnOnce(&LmdbRepoStore) -> Result<R, ProtocolError>,
{
if self.mode == ConfigMode::Core {
let repostore_id = RepoStoreId::Overlay(*overlay_id);
let reader = self.repo_stores.read().expect("read repo_store hashmap");
let rep = reader.get(&repostore_id);
match rep {
Some(repo) => return f(repo),
None => {
// we need to open/create it
// TODO: last_access
return self.open_or_create_repostore(repostore_id, |repo| f(repo));
}
}
} else {
// it is ConfigMode::Local
{
let reader = self
.overlayid_to_repostore
.read()
.expect("read overlayid_to_repostore hashmap");
match reader.get(&overlay_id) {
Some(repostoreid) => {
let reader = self.repo_stores.read().expect("read repo_store hashmap");
match reader.get(repostoreid) {
Some(repo) => return f(repo),
None => return Err(ProtocolError::BrokerError),
}
}
None => {}
};
}
// we need to open/create it
// first let's find it in the BrokerStore.overlay table to retrieve its repo_pubkey
debug_println!("searching for overlayId {}", overlay_id);
let overlay = Overlay::open(overlay_id, &self.store)?;
debug_println!("found overlayId {}", overlay_id);
let repo_id = overlay.repo()?;
let repostore_id = RepoStoreId::Repo(repo_id);
let mut writer = self
.overlayid_to_repostore
.write()
.expect("write overlayid_to_repostore hashmap");
writer.insert(*overlay_id, repostore_id.clone());
// now opening/creating the RepoStore
// TODO: last_access
return self.open_or_create_repostore(repostore_id, |repo| f(repo));
}
}
pub fn local_connection(&mut self, user: PubKey) -> BrokerConnectionLocal {
BrokerConnectionLocal::new(self, user)
}
pub fn protocol_handler(self: Arc<Self>) -> ProtocolHandler {
let (s, r) = async_channel::unbounded::<Vec<u8>>();
return ProtocolHandler {
broker: Arc::clone(&self),
protocol: ProtocolType::Start,
auth_protocol: None,
broker_protocol: None,
ext_protocol: None,
r: Some(r),
s,
};
}
pub fn add_user(
&self,
admin_user: PubKey,
user_id: PubKey,
sig: Sig,
) -> Result<(), ProtocolError> {
debug_println!("ADDING USER {}", user_id);
// TODO add is_admin boolean
// TODO check that admin_user is indeed an admin
// verify signature
let op_content = AddUserContentV0 { user: user_id };
let _ = verify(&serde_bare::to_vec(&op_content).unwrap(), sig, admin_user)?;
// check user_id is not already present
let account = Account::open(&user_id, &self.store);
if account.is_ok() {
Err(ProtocolError::UserAlreadyExists)
}
// if not, add to store
else {
let _ = Account::create(&user_id, false, &self.store)?;
Ok(())
}
}
pub fn del_user(
&self,
admin_user: PubKey,
user_id: PubKey,
sig: Sig,
) -> Result<(), ProtocolError> {
// TODO implement del_user
Ok(())
}
pub fn add_client(
&self,
user: PubKey,
client_id: PubKey,
sig: Sig,
) -> Result<(), ProtocolError> {
// TODO implement add_client
Ok(())
}
pub fn del_client(
&self,
user: PubKey,
client_id: PubKey,
sig: Sig,
) -> Result<(), ProtocolError> {
// TODO implement del_client
Ok(())
}
pub fn connect_overlay(&self, user: PubKey, overlay: OverlayId) -> Result<(), ProtocolError> {
// TODO check that the broker has already joined this overlay. if not, send OverlayNotJoined
Err(ProtocolError::OverlayNotJoined)
}
pub fn del_object(
&self,
user: PubKey,
overlay: Digest,
id: ObjectId,
) -> Result<(), ProtocolError> {
self.get_repostore_from_overlay_id(&overlay, |store| {
// TODO, only admin users can delete on a store on this broker
let obj = Object::load(id, None, store);
if obj.is_err() {
return Err(ProtocolError::NotFound);
}
let o = obj.ok().unwrap();
let mut deduplicated: HashSet<ObjectId> = HashSet::new();
for block in o.blocks() {
let id = block.id();
if deduplicated.get(&id).is_none() {
store.del(&id)?;
deduplicated.insert(id);
}
}
Ok(())
})
}
pub fn pin_object(
&self,
user: PubKey,
overlay: OverlayId,
id: ObjectId,
) -> Result<(), ProtocolError> {
self.get_repostore_from_overlay_id(&overlay, |store| {
// TODO, store the user who pins, and manage reference counting on how many users pin/unpin
let obj = Object::load(id, None, store);
if obj.is_err() {
return Err(ProtocolError::NotFound);
}
let o = obj.ok().unwrap();
let mut deduplicated: HashSet<ObjectId> = HashSet::new();
for block in o.blocks() {
let id = block.id();
if deduplicated.get(&id).is_none() {
store.pin(&id)?;
deduplicated.insert(id);
}
}
Ok(())
})
}
pub fn unpin_object(
&self,
user: PubKey,
overlay: OverlayId,
id: ObjectId,
) -> Result<(), ProtocolError> {
self.get_repostore_from_overlay_id(&overlay, |store| {
// TODO, store the user who pins, and manage reference counting on how many users pin/unpin
let obj = Object::load(id, None, store);
if obj.is_err() {
return Err(ProtocolError::NotFound);
}
let o = obj.ok().unwrap();
let mut deduplicated: HashSet<ObjectId> = HashSet::new();
for block in o.blocks() {
let id = block.id();
if deduplicated.get(&id).is_none() {
store.unpin(&id)?;
deduplicated.insert(id);
}
}
Ok(())
})
}
pub fn copy_object(
&self,
user: PubKey,
overlay: OverlayId,
id: ObjectId,
expiry: Option<Timestamp>,
) -> Result<ObjectId, ProtocolError> {
// self.get_repostore_from_overlay_id(&overlay, |store| {
// //let obj = Object::from_store(id, None, store);
// //Ok(Object::copy(id, expiry, store)?)
// });
todo!();
}
pub fn put_block(
&self,
user: PubKey,
overlay: OverlayId,
block: &Block,
) -> Result<(), ProtocolError> {
self.get_repostore_from_overlay_id(&overlay, |store| {
let _ = store.put(block)?;
Ok(())
})
}
pub fn get_block(
&self,
user: PubKey,
overlay: OverlayId,
id: BlockId,
include_children: bool,
topic: Option<PubKey>,
) -> Result<async_channel::Receiver<Block>, ProtocolError> {
self.get_repostore_from_overlay_id(&overlay, |store| {
let (s, r) = async_channel::unbounded::<Block>();
if !include_children {
let block = store.get(&id)?;
s.send_blocking(block)
.map_err(|_e| ProtocolError::WriteError)?;
Ok(r)
} else {
let obj = Object::load(id, None, store);
// TODO return partial blocks when some are missing ?
if obj.is_err() {
//&& obj.err().unwrap().len() == 1 && obj.err().unwrap()[0] == id {
return Err(ProtocolError::NotFound);
}
// TODO use a task to send non blocking (streaming)
let o = obj.ok().unwrap();
//debug_println!("{} BLOCKS ", o.blocks().len());
let mut deduplicated: HashSet<BlockId> = HashSet::new();
for block in o.blocks() {
let id = block.id();
if deduplicated.get(&id).is_none() {
s.send_blocking(block.clone())
.map_err(|_e| ProtocolError::WriteError)?;
deduplicated.insert(id);
}
}
Ok(r)
}
})
}
pub fn sync_branch(
&self,
user: PubKey,
overlay: &OverlayId,
heads: &Vec<ObjectId>,
known_heads: &Vec<ObjectId>,
known_commits: &BloomFilter,
) -> Result<async_channel::Receiver<Block>, ProtocolError> {
//debug_println!("heads {:?}", heads);
//debug_println!("known_heads {:?}", known_heads);
//debug_println!("known_commits {:?}", known_commits);
self.get_repostore_from_overlay_id(&overlay, |store| {
let (s, r) = async_channel::unbounded::<Block>();
let res = Branch::sync_req(heads, known_heads, known_commits, store)
.map_err(|e| ProtocolError::ObjectParseError)?;
// todo, use a task to send non blocking (streaming)
debug_println!("SYNCING {} COMMITS", res.len());
let mut deduplicated: HashSet<BlockId> = HashSet::new();
for objectid in res {
let object = Object::load(objectid, None, store)?;
for block in object.blocks() {
let id = block.id();
if deduplicated.get(&id).is_none() {
s.send_blocking(block.clone())
.map_err(|_e| ProtocolError::WriteError)?;
deduplicated.insert(id);
}
}
}
Ok(r)
})
}
fn compute_repostore_id(&self, overlay: OverlayId, repo_id: Option<PubKey>) -> RepoStoreId {
match self.mode {
ConfigMode::Core => RepoStoreId::Overlay(overlay),
ConfigMode::Local => RepoStoreId::Repo(repo_id.unwrap()),
}
}
pub fn join_overlay(
&self,
user: PubKey,
overlay_id: OverlayId,
repo_id: Option<PubKey>,
secret: SymKey,
peers: &Vec<PeerAdvert>,
) -> Result<(), ProtocolError> {
// check if this overlay already exists
//debug_println!("SEARCHING OVERLAY");
let overlay_res = Overlay::open(&overlay_id, &self.store);
let overlay = match overlay_res {
Err(StorageError::NotFound) => {
// we have to add it
if self.mode == ConfigMode::Local && repo_id.is_none() {
return Err(ProtocolError::RepoIdRequired);
}
let over = Overlay::create(
&overlay_id,
&secret,
if self.mode == ConfigMode::Local {
repo_id
} else {
None
},
&self.store,
)?;
// we need to add an encryption key for the repostore.
let mut random_buf = [0u8; 32];
getrandom::getrandom(&mut random_buf).unwrap();
let key = SymKey::ChaCha20Key(random_buf);
let _ = RepoStoreInfo::create(
&self.compute_repostore_id(overlay_id, repo_id),
&key,
&self.store,
)?; // TODO in case of error, delete the previously created Overlay
//debug_println!("KEY ADDED");
over
}
Err(e) => return Err(e.into()),
Ok(overlay) => overlay,
};
//debug_println!("OVERLAY FOUND");
// add the peers to the overlay
for advert in peers {
Peer::update_or_create(advert, &self.store)?;
overlay.add_peer(&advert.peer())?;
}
//debug_println!("PEERS ADDED");
// now adding the overlay_id to the account
let account = Account::open(&user, &self.store)?; // TODO in case of error, delete the previously created Overlay
account.add_overlay(&overlay_id)?;
//debug_println!("USER <-> OVERLAY");
//TODO: connect to peers
Ok(())
}
}

@ -0,0 +1,160 @@
/*
* Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
* All rights reserved.
* Licensed under the Apache License, Version 2.0
* <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
* or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
* at your option. All files in the project carrying such
* notice may not be copied, modified, or distributed except
* according to those terms.
*/
use async_std::net::{TcpListener, TcpStream};
use async_std::sync::Mutex;
use async_std::task;
use async_tungstenite::accept_async;
use async_tungstenite::tungstenite::protocol::Message;
use debug_print::*;
use futures::{SinkExt, StreamExt};
use crate::broker_store_config::ConfigMode;
use crate::server::*;
use p2p_stores_lmdb::broker_store::LmdbBrokerStore;
use p2p_stores_lmdb::repo_store::LmdbRepoStore;
use std::fs;
use std::sync::Arc;
use tempfile::Builder;
use std::{thread, time};
pub async fn connection_loop(tcp: TcpStream, mut handler: ProtocolHandler) -> std::io::Result<()> {
let mut ws = accept_async(tcp).await.unwrap();
let (mut tx, mut rx) = ws.split();
let mut tx_mutex = Arc::new(Mutex::new(tx));
// setup the async frames task
let receiver = handler.async_frames_receiver();
let ws_in_task = Arc::clone(&tx_mutex);
task::spawn(async move {
while let Ok(frame) = receiver.recv().await {
let mut sink = ws_in_task
.lock()
.await;
if sink.send(Message::binary(frame))
.await
.is_err()
{
break;
}
}
debug_println!("end of async frames loop");
let mut sink = ws_in_task.lock().await;
let _ = sink.send(Message::Close(None)).await;
let _ = sink.close().await;
});
while let Some(msg) = rx.next().await {
//debug_println!("RCV: {:?}", msg);
let msg = match msg {
Err(e) => {
debug_println!("Error on server stream: {:?}", e);
// Errors returned directly through the AsyncRead/Write API are fatal, generally an error on the underlying
// transport. closing connection
break;
}
Ok(m) => m,
};
//TODO implement PING messages
if msg.is_close() {
debug_println!("CLOSE from CLIENT");
break;
} else if msg.is_binary() {
//debug_println!("server received binary: {:?}", msg);
let replies = handler.handle_incoming(msg.into_data()).await;
match replies.0 {
Err(e) => {
debug_println!("Protocol Error: {:?}", e);
// dealing with ProtocolErrors (closing the connection)
break;
}
Ok(r) => {
if tx_mutex
.lock()
.await
.send(Message::binary(r))
.await
.is_err()
{
//dealing with sending errors (closing the connection)
break;
}
}
}
match replies.1.await {
Some(errcode) => {
if errcode > 0 {
debug_println!("Close due to error code : {:?}", errcode);
//closing connection
break;
}
}
None => {}
}
}
}
let mut sink = tx_mutex.lock().await;
let _ = sink.send(Message::Close(None)).await;
let _ = sink.close().await;
debug_println!("end of sync read+write loop");
Ok(())
}
pub async fn run_server_accept_one(addrs: &str) -> std::io::Result<()> {
let root = tempfile::Builder::new()
.prefix("node-daemon")
.tempdir()
.unwrap();
let master_key: [u8; 32] = [0; 32];
std::fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap());
let store = LmdbBrokerStore::open(root.path(), master_key);
let server: BrokerServer =
BrokerServer::new(store, ConfigMode::Local).expect("starting broker");
let socket = TcpListener::bind(addrs).await?;
debug_println!("Listening on 127.0.0.1:3012");
let mut connections = socket.incoming();
let server_arc = Arc::new(server);
let tcp = connections.next().await.unwrap()?;
let proto_handler = Arc::clone(&server_arc).protocol_handler();
let _handle = task::spawn(connection_loop(tcp, proto_handler));
Ok(())
}
pub async fn run_server(addrs: &str) -> std::io::Result<()> {
let root = tempfile::Builder::new()
.prefix("node-daemon")
.tempdir()
.unwrap();
let master_key: [u8; 32] = [0; 32];
std::fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap());
let store = LmdbBrokerStore::open(root.path(), master_key);
let server: BrokerServer =
BrokerServer::new(store, ConfigMode::Local).expect("starting broker");
let socket = TcpListener::bind(addrs).await?;
let mut connections = socket.incoming();
let server_arc = Arc::new(server);
while let Some(tcp) = connections.next().await {
let proto_handler = Arc::clone(&server_arc).protocol_handler();
let _handle = task::spawn(connection_loop(tcp.unwrap(), proto_handler));
}
Ok(())
}

@ -0,0 +1,24 @@
[package]
name = "p2p-client"
version = "0.1.0"
edition = "2021"
license = "MIT/Apache-2.0"
authors = ["Niko PLP <niko@nextgraph.org>"]
description = "P2P Client module of NextGraph"
repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs"
[dependencies]
debug_print = "1.0.0"
p2p-repo = { path = "../p2p-repo" }
p2p-net = { path = "../p2p-net" }
chacha20 = "0.9.0"
serde = { version = "1.0", features = ["derive"] }
serde_bare = "0.5.0"
serde_bytes = "0.11.7"
xactor = "0.7.11"
async-trait = "0.1.57"
async-std = { version = "1.7.0", features = ["attributes"] }
futures = "0.3.24"
async-channel = "1.7.1"
async-oneshot = "0.5.0"
async-tungstenite = { version = "0.17.2", features = ["async-std-runtime","async-native-tls"] }

@ -0,0 +1,596 @@
/*
* Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
* All rights reserved.
* Licensed under the Apache License, Version 2.0
* <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
* or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
* at your option. All files in the project carrying such
* notice may not be copied, modified, or distributed except
* according to those terms.
*/
use async_std::task;
use async_std::sync::Mutex;
use futures::{
ready,
stream::Stream,
task::{Context, Poll},
Future,
select, FutureExt,
};
use futures::channel::mpsc;
use std::pin::Pin;
use std::{collections::HashSet, fmt::Debug};
use async_oneshot::oneshot;
use debug_print::*;
use futures::{pin_mut, stream, Sink, SinkExt, StreamExt};
use p2p_repo::object::*;
use p2p_repo::store::*;
use p2p_repo::types::*;
use p2p_repo::utils::*;
use p2p_net::errors::*;
use p2p_net::types::*;
use p2p_net::broker_connection::*;
use std::collections::HashMap;
use std::sync::{Arc, RwLock};
use xactor::{message, spawn, Actor, Addr, Handler, WeakAddr};
#[message]
struct BrokerMessageXActor(BrokerMessage);
struct BrokerMessageActor {
r: Option<async_oneshot::Receiver<BrokerMessage>>,
s: async_oneshot::Sender<BrokerMessage>,
}
impl Actor for BrokerMessageActor {}
impl BrokerMessageActor {
fn new() -> BrokerMessageActor {
let (s, r) = oneshot::<BrokerMessage>();
BrokerMessageActor { r: Some(r), s }
}
fn resolve(&mut self, msg: BrokerMessage) {
let _ = self.s.send(msg);
}
fn receiver(&mut self) -> async_oneshot::Receiver<BrokerMessage> {
self.r.take().unwrap()
}
}
struct BrokerMessageStreamActor {
r: Option<async_channel::Receiver<Block>>,
s: async_channel::Sender<Block>,
error_r: Option<async_oneshot::Receiver<Option<ProtocolError>>>,
error_s: Option<async_oneshot::Sender<Option<ProtocolError>>>,
}
impl Actor for BrokerMessageStreamActor {}
impl BrokerMessageStreamActor {
fn new() -> BrokerMessageStreamActor {
let (s, r) = async_channel::unbounded::<Block>();
let (error_s, error_r) = oneshot::<Option<ProtocolError>>();
BrokerMessageStreamActor {
r: Some(r),
s,
error_r: Some(error_r),
error_s: Some(error_s),
}
}
async fn partial(&mut self, block: Block) -> Result<(), ProtocolError> {
//debug_println!("GOT PARTIAL {:?}", block.id());
self.s
.send(block)
.await
.map_err(|e| ProtocolError::WriteError)
}
fn receiver(&mut self) -> async_channel::Receiver<Block> {
self.r.take().unwrap()
}
fn error_receiver(&mut self) -> async_oneshot::Receiver<Option<ProtocolError>> {
self.error_r.take().unwrap()
}
fn send_error(&mut self, err: Option<ProtocolError>) {
if self.error_s.is_some() {
let _ = self.error_s.take().unwrap().send(err);
self.error_s = None;
}
}
fn close(&mut self) {
self.s.close();
}
}
#[async_trait::async_trait]
impl Handler<BrokerMessageXActor> for BrokerMessageActor {
async fn handle(&mut self, ctx: &mut xactor::Context<Self>, msg: BrokerMessageXActor) {
//println!("handling {:?}", msg.0);
self.resolve(msg.0);
ctx.stop(None);
}
}
#[async_trait::async_trait]
impl Handler<BrokerMessageXActor> for BrokerMessageStreamActor {
async fn handle(&mut self, ctx: &mut xactor::Context<Self>, msg: BrokerMessageXActor) {
//println!("handling {:?}", msg.0);
let res: Result<Option<Block>, ProtocolError> = msg.0.into();
match res {
Err(e) => {
self.send_error(Some(e));
ctx.stop(None);
self.close();
}
Ok(Some(b)) => {
self.send_error(None);
// it must be a partial content
let res = self.partial(b).await;
if let Err(e) = res {
ctx.stop(None);
self.close();
}
}
Ok(None) => {
self.send_error(None);
ctx.stop(None);
self.close();
}
}
}
}
pub struct ConnectionRemote {}
impl ConnectionRemote {
pub async fn ext_request<
B: Stream<Item = Vec<u8>> + StreamExt + Send + Sync,
A: Sink<Vec<u8>, Error = ProtocolError> + Send,
>(
w: A,
r: B,
request: ExtRequest,
) -> Result<ExtResponse, ProtocolError> {
unimplemented!();
}
async fn close<S>(w: S, err: ProtocolError) -> ProtocolError
where
S: Sink<Vec<u8>, Error = ProtocolError>,
{
let mut writer = Box::pin(w);
let _ = writer.send(vec![]);
let _ = writer.close().await;
err
}
pub async fn open_broker_connection<
B: Stream<Item = Vec<u8>> + StreamExt + Send + Sync + 'static,
A: Sink<Vec<u8>, Error = ProtocolError> + Send + 'static,
>(
w: A,
r: B,
user: PubKey,
user_pk: PrivKey,
client: PubKey,
) -> Result<impl BrokerConnection, ProtocolError> {
let mut writer = Box::pin(w);
writer
.send(serde_bare::to_vec(&StartProtocol::Auth(ClientHello::V0()))?)
.await
.map_err(|_e| ProtocolError::WriteError)?;
let mut reader = Box::pin(r);
let answer = reader.next().await;
if answer.is_none() {
return Err(Self::close(writer, ProtocolError::InvalidState).await);
}
let server_hello = serde_bare::from_slice::<ServerHello>(&answer.unwrap())?;
//debug_println!("received nonce from server: {:?}", server_hello.nonce());
let content = ClientAuthContentV0 {
user,
client,
nonce: server_hello.nonce().clone(),
};
let sig = sign(user_pk, user, &serde_bare::to_vec(&content)?)
.map_err(|_e| ProtocolError::SignatureError)?;
let auth_ser = serde_bare::to_vec(&ClientAuth::V0(ClientAuthV0 { content, sig }))?;
//debug_println!("AUTH SENT {:?}", auth_ser);
writer
.send(auth_ser)
.await
.map_err(|_e| ProtocolError::WriteError)?;
let answer = reader.next().await;
if answer.is_none() {
//return Err(ProtocolError::InvalidState);
return Err(Self::close(writer, ProtocolError::InvalidState).await);
}
let auth_result = serde_bare::from_slice::<AuthResult>(&answer.unwrap())?;
match auth_result.result() {
0 => {
async fn transform(message: BrokerMessage) -> Result<Vec<u8>, ProtocolError> {
if message.is_close() {
Ok(vec![])
} else {
Ok(serde_bare::to_vec(&message)?)
}
}
let messages_stream_write = writer.with(|message| transform(message));
let mut messages_stream_read = reader.map(|message| {
if message.len() == 0 {
BrokerMessage::Close
} else {
match serde_bare::from_slice::<BrokerMessage>(&message) {
Err(e) => BrokerMessage::Close,
Ok(m) => m
}
}
});
let cnx =
BrokerConnectionRemote::open(messages_stream_write, messages_stream_read, user);
Ok(cnx)
}
err => Err(Self::close(writer, ProtocolError::try_from(err).unwrap()).await),
}
}
}
pub struct BrokerConnectionRemote<T>
where
T: Sink<BrokerMessage> + Send + 'static,
{
writer: Arc<Mutex<Pin<Box<T>>>>,
user: PubKey,
actors: Arc<RwLock<HashMap<u64, WeakAddr<BrokerMessageActor>>>>,
stream_actors: Arc<RwLock<HashMap<u64, WeakAddr<BrokerMessageStreamActor>>>>,
shutdown: mpsc::UnboundedSender<Void>,
}
#[async_trait::async_trait]
impl<T> BrokerConnection for BrokerConnectionRemote<T>
where
T: Sink<BrokerMessage> + Send,
{
type OC = BrokerConnectionRemote<T>;
type BlockStream = async_channel::Receiver<Block>;
async fn close(&mut self) {
let _ = self.shutdown.close().await;
let mut w = self.writer.lock().await;
let _ = w.send(BrokerMessage::Close).await;
let _ = w.close().await;
}
async fn process_overlay_request_stream_response(
&mut self,
overlay: OverlayId,
request: BrokerOverlayRequestContentV0,
) -> Result<Pin<Box<Self::BlockStream>>, ProtocolError> {
let mut actor = BrokerMessageStreamActor::new();
let receiver = actor.receiver();
let error_receiver = actor.error_receiver();
let mut addr = actor
.start()
.await
.map_err(|_e| ProtocolError::ActorError)?;
let request_id = addr.actor_id();
//debug_println!("actor ID {}", request_id);
{
let mut map = self.stream_actors.write().expect("RwLock poisoned");
map.insert(request_id, addr.downgrade());
}
let mut w = self.writer.lock().await;
w.send(BrokerMessage::V0(BrokerMessageV0 {
padding: vec![], //FIXME implement padding
content: BrokerMessageContentV0::BrokerOverlayMessage(BrokerOverlayMessage::V0(
BrokerOverlayMessageV0 {
overlay,
content: BrokerOverlayMessageContentV0::BrokerOverlayRequest(
BrokerOverlayRequest::V0(BrokerOverlayRequestV0 {
id: request_id,
content: request,
}),
),
},
)),
}))
.await
.map_err(|_e| ProtocolError::WriteError)?;
//debug_println!("waiting for first reply");
let reply = error_receiver.await;
match reply {
Err(_e) => {
Err(ProtocolError::Closing)
}
Ok(Some(e)) => {
let mut map = self.stream_actors.write().expect("RwLock poisoned");
map.remove(&request_id);
return Err(e);
}
Ok(None) => {
let stream_actors_in_thread = Arc::clone(&self.stream_actors);
task::spawn(async move {
addr.wait_for_stop().await; // TODO add timeout
let mut map = stream_actors_in_thread.write().expect("RwLock poisoned");
map.remove(&request_id);
});
Ok(Box::pin(receiver))
}
}
}
async fn process_overlay_request_objectid_response(
&mut self,
overlay: OverlayId,
request: BrokerOverlayRequestContentV0,
) -> Result<ObjectId, ProtocolError> {
before!(self, request_id, addr, receiver);
self.writer.lock().await
.send(BrokerMessage::V0(BrokerMessageV0 {
padding: vec![], // FIXME implement padding
content: BrokerMessageContentV0::BrokerOverlayMessage(BrokerOverlayMessage::V0(
BrokerOverlayMessageV0 {
overlay,
content: BrokerOverlayMessageContentV0::BrokerOverlayRequest(
BrokerOverlayRequest::V0(BrokerOverlayRequestV0 {
id: request_id,
content: request,
}),
),
},
)),
}))
.await
.map_err(|_e| ProtocolError::WriteError)?;
after!(self, request_id, addr, receiver, reply);
reply.into()
}
async fn process_overlay_request(
&mut self,
overlay: OverlayId,
request: BrokerOverlayRequestContentV0,
) -> Result<(), ProtocolError> {
before!(self, request_id, addr, receiver);
self.writer.lock().await
.send(BrokerMessage::V0(BrokerMessageV0 {
padding: vec![], // FIXME implement padding
content: BrokerMessageContentV0::BrokerOverlayMessage(BrokerOverlayMessage::V0(
BrokerOverlayMessageV0 {
overlay,
content: BrokerOverlayMessageContentV0::BrokerOverlayRequest(
BrokerOverlayRequest::V0(BrokerOverlayRequestV0 {
id: request_id,
content: request,
}),
),
},
)),
}))
.await
.map_err(|_e| ProtocolError::WriteError)?;
after!(self, request_id, addr, receiver, reply);
reply.into()
}
async fn add_user(
&mut self,
user_id: PubKey,
admin_user_pk: PrivKey,
) -> Result<(), ProtocolError> {
before!(self, request_id, addr, receiver);
let op_content = AddUserContentV0 { user: user_id };
let sig = sign(
admin_user_pk,
self.user,
&serde_bare::to_vec(&op_content)?,
)?;
self.writer.lock().await
.send(BrokerMessage::V0(BrokerMessageV0 {
padding: vec![], // TODO implement padding
content: BrokerMessageContentV0::BrokerRequest(BrokerRequest::V0(
BrokerRequestV0 {
id: request_id,
content: BrokerRequestContentV0::AddUser(AddUser::V0(AddUserV0 {
content: op_content,
sig,
})),
},
)),
}))
.await
.map_err(|_e| ProtocolError::WriteError)?;
after!(self, request_id, addr, receiver, reply);
reply.into()
}
async fn del_user(&mut self, user_id: PubKey, admin_user_pk: PrivKey) {}
async fn add_client(&mut self, client_id: ClientId, user_pk: PrivKey) {}
async fn del_client(&mut self, client_id: ClientId, user_pk: PrivKey) {}
async fn overlay_connect(
&mut self,
repo_link: &RepoLink,
public: bool,
) -> Result<OverlayConnectionClient<BrokerConnectionRemote<T>>, ProtocolError> {
let overlay = self.process_overlay_connect(repo_link, public).await?;
Ok(OverlayConnectionClient::create(self, overlay,repo_link.clone() ))
}
}
#[derive(Debug)]
enum Void {}
impl<T> BrokerConnectionRemote<T>
where
T: Sink<BrokerMessage> + Send,
{
async fn connection_reader_loop<
U: Stream<Item = BrokerMessage> + StreamExt + Send + Sync + Unpin + 'static,
>(
stream: U,
actors: Arc<RwLock<HashMap<u64, WeakAddr<BrokerMessageActor>>>>,
stream_actors: Arc<RwLock<HashMap<u64, WeakAddr<BrokerMessageStreamActor>>>>,
shutdown: mpsc::UnboundedReceiver<Void>,
) -> Result<(), ProtocolError> {
let mut s = stream.fuse();
let mut shutdown = shutdown.fuse();
loop {
select! {
void = shutdown.next().fuse() => match void {
Some(void) => match void {},
None => break,
},
message = s.next().fuse() => match message {
Some(message) =>
{
//debug_println!("GOT MESSAGE {:?}", message);
if message.is_close() {
// releasing the blocking calls on the actors
let map = actors.read().expect("RwLock poisoned");
for (a) in map.values() {
if let Some(mut addr) = a.upgrade() {
let _ = addr.stop(Some(ProtocolError::Closing.into()));
}
}
let map2 = stream_actors.read().expect("RwLock poisoned");
for (a) in map2.values() {
if let Some(mut addr) = a.upgrade() {
let _ = addr.stop(Some(ProtocolError::Closing.into()));
}
}
return Err(ProtocolError::Closing);
}
if message.is_request() {
debug_println!("is request {}", message.id());
// closing connection. a client is not supposed to receive requests.
return Err(ProtocolError::Closing);
} else if message.is_response() {
let id = message.id();
//debug_println!("is response for {}", id);
{
let map = actors.read().expect("RwLock poisoned");
match map.get(&id) {
Some(weak_addr) => match weak_addr.upgrade() {
Some(addr) => {
addr.send(BrokerMessageXActor(message))
.map_err(|e| ProtocolError::Closing)?
//.expect("sending message back to actor failed");
}
None => {
debug_println!("ERROR. Addr is dead for ID {}", id);
return Err(ProtocolError::Closing);
}
},
None => {
let map2 = stream_actors.read().expect("RwLock poisoned");
match map2.get(&id) {
Some(weak_addr) => match weak_addr.upgrade() {
Some(addr) => {
addr.send(BrokerMessageXActor(message))
.map_err(|e| ProtocolError::Closing)?
//.expect("sending message back to stream actor failed");
}
None => {
debug_println!(
"ERROR. Addr is dead for ID {} {:?}",
id,
message
);
return Err(ProtocolError::Closing);
}
},
None => {
debug_println!("Actor ID not found {} {:?}", id, message);
return Err(ProtocolError::Closing);
}
}
}
}
}
}
},
None => break,
}
}
}
Ok(())
}
pub fn open<U: Stream<Item = BrokerMessage> + StreamExt + Send + Sync + Unpin + 'static>(
writer: T,
reader: U,
user: PubKey,
) -> BrokerConnectionRemote<T> {
let actors: Arc<RwLock<HashMap<u64, WeakAddr<BrokerMessageActor>>>> =
Arc::new(RwLock::new(HashMap::new()));
let stream_actors: Arc<RwLock<HashMap<u64, WeakAddr<BrokerMessageStreamActor>>>> =
Arc::new(RwLock::new(HashMap::new()));
let (shutdown_sender, shutdown_receiver) = mpsc::unbounded::<Void>();
let w = Arc::new(Mutex::new(Box::pin(writer)));
let ws_in_task = Arc::clone(&w);
let actors_in_thread = Arc::clone(&actors);
let stream_actors_in_thread = Arc::clone(&stream_actors);
task::spawn(async move {
debug_println!("START of reader loop");
if let Err(e) =
Self::connection_reader_loop(reader, actors_in_thread, stream_actors_in_thread, shutdown_receiver)
.await
{
debug_println!("closing because of {}", e);
let _ = ws_in_task.lock().await.close().await;
}
debug_println!("END of reader loop");
});
BrokerConnectionRemote::<T> {
writer: Arc::clone(&w),
user,
actors: Arc::clone(&actors),
stream_actors: Arc::clone(&stream_actors),
shutdown:shutdown_sender ,
}
}
}

@ -0,0 +1,95 @@
/*
* Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
* All rights reserved.
* Licensed under the Apache License, Version 2.0
* <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
* or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
* at your option. All files in the project carrying such
* notice may not be copied, modified, or distributed except
* according to those terms.
*/
use debug_print::*;
use p2p_repo::types::*;
use p2p_repo::utils::{generate_keypair, now_timestamp};
use p2p_net::errors::*;
use p2p_net::types::*;
use p2p_net::broker_connection::*;
use crate::connection_remote::*;
use futures::{future, pin_mut, stream, SinkExt, StreamExt};
use async_tungstenite::async_std::connect_async;
use async_tungstenite::client_async;
use async_tungstenite::tungstenite::{Error, Message};
pub struct BrokerConnectionWebSocket {
}
impl BrokerConnectionWebSocket{
pub async fn open(url:&str, priv_key: PrivKey, pub_key: PubKey) -> Result<impl BrokerConnection, ProtocolError>
{
let res = connect_async(url).await;
match (res) {
Ok((ws, _)) => {
debug_println!("WebSocket handshake completed");
let (write, read) = ws.split();
let mut frames_stream_read = read.map(|msg_res| match msg_res {
Err(e) => {
debug_println!("ERROR {:?}", e);
vec![]
}
Ok(message) => {
if message.is_close() {
debug_println!("CLOSE FROM SERVER");
vec![]
} else {
message.into_data()
}
}
});
async fn transform(message: Vec<u8>) -> Result<Message, Error> {
if message.len() == 0 {
debug_println!("sending CLOSE message to SERVER");
Ok(Message::Close(None))
} else {
Ok(Message::binary(message))
}
}
let frames_stream_write = write
.with(|message| transform(message))
.sink_map_err(|e| ProtocolError::WriteError);
let master_key: [u8; 32] = [0; 32];
let mut cnx_res = ConnectionRemote::open_broker_connection(
frames_stream_write,
frames_stream_read,
pub_key,
priv_key,
PubKey::Ed25519PubKey([1; 32]),
)
.await;
match cnx_res {
Ok(mut cnx) => {
Ok(cnx)
}
Err(e) => {
debug_println!("cannot connect {:?}", e);
Err(e)
}
}
}
Err(e) => {
debug_println!("Cannot connect: {:?}", e);
Err(ProtocolError::ConnectionError)
}
}
}
}

@ -0,0 +1,47 @@
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
#[macro_export]
macro_rules! before {
( $self:expr, $request_id:ident, $addr:ident, $receiver:ident ) => {
let mut actor = BrokerMessageActor::new();
let $receiver = actor.receiver();
let mut $addr = actor
.start()
.await
.map_err(|_e| ProtocolError::ActorError)?;
let $request_id = $addr.actor_id();
//debug_println!("actor ID {}", $request_id);
{
let mut map = $self.actors.write().expect("RwLock poisoned");
map.insert($request_id, $addr.downgrade());
}
};
}
macro_rules! after {
( $self:expr, $request_id:ident, $addr:ident, $receiver:ident, $reply:ident ) => {
//debug_println!("waiting for reply");
$addr.wait_for_stop().await; // TODO add timeout and close connection if there's no reply
let r = $receiver.await;
if r.is_err() { return Err(ProtocolError::Closing);}
let $reply = r.unwrap();
//debug_println!("reply arrived {:?}", $reply);
{
let mut map = $self.actors.write().expect("RwLock poisoned");
map.remove(&$request_id);
}
};
}
pub mod connection_remote;
pub mod connection_ws;

@ -0,0 +1,20 @@
[package]
name = "p2p-net"
version = "0.1.0"
edition = "2021"
license = "MIT/Apache-2.0"
authors = ["Niko PLP <niko@nextgraph.org>"]
description = "P2P network module of NextGraph"
repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs"
[dependencies]
debug_print = "1.0.0"
p2p-repo = { path = "../p2p-repo" }
serde = { version = "1.0", features = ["derive"] }
serde_bare = "0.5.0"
serde_bytes = "0.11.7"
num_enum = "0.5.7"
async-broadcast = "0.4.1"
futures = "0.3.24"
async-trait = "0.1.57"
blake3 = "1.3.1"

@ -0,0 +1,337 @@
/*
* Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
* All rights reserved.
* Licensed under the Apache License, Version 2.0
* <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
* or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
* at your option. All files in the project carrying such
* notice may not be copied, modified, or distributed except
* according to those terms.
*/
//! Connection to a Broker, can be local or remote.
//! If remote, it will use a Stream and Sink of framed messages
//! This is the trait
//!
use futures::{
ready,
stream::Stream,
task::{Context, Poll},
Future,
select, FutureExt,
};
use futures::channel::mpsc;
use std::pin::Pin;
use std::{collections::HashSet, fmt::Debug};
use async_broadcast::{broadcast, Receiver};
use debug_print::*;
use futures::{pin_mut, stream, Sink, SinkExt, StreamExt};
use p2p_repo::object::*;
use p2p_repo::store::*;
use p2p_repo::types::*;
use p2p_repo::utils::*;
use crate::errors::*;
use crate::types::*;
#[async_trait::async_trait]
pub trait BrokerConnection {
type OC: BrokerConnection;
type BlockStream: Stream<Item = Block>;
async fn close(&mut self);
async fn add_user(
&mut self,
user_id: PubKey,
admin_user_pk: PrivKey,
) -> Result<(), ProtocolError>;
async fn del_user(&mut self, user_id: PubKey, admin_user_pk: PrivKey);
async fn add_client(&mut self, client_id: ClientId, user_pk: PrivKey);
async fn del_client(&mut self, client_id: ClientId, user_pk: PrivKey);
async fn overlay_connect(
&mut self,
repo: &RepoLink,
public: bool,
) -> Result<OverlayConnectionClient<Self::OC>, ProtocolError>;
// TODO: remove those 4 functions from trait. they are used internally only. should not be exposed to end-user
async fn process_overlay_request(
&mut self,
overlay: OverlayId,
request: BrokerOverlayRequestContentV0,
) -> Result<(), ProtocolError>;
async fn process_overlay_request_stream_response(
&mut self,
overlay: OverlayId,
request: BrokerOverlayRequestContentV0,
) -> Result<Pin<Box<Self::BlockStream>>, ProtocolError>;
async fn process_overlay_request_objectid_response(
&mut self,
overlay: OverlayId,
request: BrokerOverlayRequestContentV0,
) -> Result<ObjectId, ProtocolError>;
async fn process_overlay_connect(
&mut self,
repo_link: &RepoLink,
public: bool,
) -> Result<OverlayId, ProtocolError> {
let overlay: OverlayId = match public {
true => Digest::Blake3Digest32(*blake3::hash(repo_link.id().slice()).as_bytes()),
false => {
let key: [u8; blake3::OUT_LEN] =
blake3::derive_key("NextGraph OverlayId BLAKE3 key", repo_link.secret().slice());
let keyed_hash = blake3::keyed_hash(&key, repo_link.id().slice());
Digest::Blake3Digest32(*keyed_hash.as_bytes())
}
};
let res = self
.process_overlay_request(
overlay,
BrokerOverlayRequestContentV0::OverlayConnect(OverlayConnect::V0()),
)
.await;
match res {
Err(e) => {
if e == ProtocolError::OverlayNotJoined {
debug_println!("OverlayNotJoined");
let res2 = self
.process_overlay_request(
overlay,
BrokerOverlayRequestContentV0::OverlayJoin(OverlayJoin::V0(
OverlayJoinV0 {
secret: repo_link.secret(),
peers: repo_link.peers(),
repo_pubkey: Some(repo_link.id()), //TODO if we know we are connecting to a core node, we can pass None here
},
)),
)
.await?;
} else {
return Err(e);
}
}
Ok(()) => {}
}
debug_println!("OverlayConnectionClient ready");
Ok(overlay)
}
}
pub struct OverlayConnectionClient<'a, T>
where
T: BrokerConnection,
{
broker: &'a mut T,
overlay: OverlayId,
repo_link: RepoLink,
}
impl<'a, T> OverlayConnectionClient<'a, T>
where
T: BrokerConnection,
{
pub fn create( broker: &'a mut T, overlay: OverlayId, repo_link: RepoLink) -> OverlayConnectionClient<'a, T> {
OverlayConnectionClient {
broker,
repo_link,
overlay,
}
}
pub fn overlay(repo_link: &RepoLink, public: bool) -> OverlayId {
let overlay: OverlayId = match public {
true => Digest::Blake3Digest32(*blake3::hash(repo_link.id().slice()).as_bytes()),
false => {
let key: [u8; blake3::OUT_LEN] =
blake3::derive_key("NextGraph OverlayId BLAKE3 key", repo_link.secret().slice());
let keyed_hash = blake3::keyed_hash(&key, repo_link.id().slice());
Digest::Blake3Digest32(*keyed_hash.as_bytes())
}
};
overlay
}
pub async fn sync_branch(
&mut self,
heads: Vec<ObjectId>,
known_heads: Vec<ObjectId>,
known_commits: BloomFilter,
) -> Result<Pin<Box<T::BlockStream>>, ProtocolError> {
self.broker
.process_overlay_request_stream_response(
self.overlay,
BrokerOverlayRequestContentV0::BranchSyncReq(BranchSyncReq::V0(BranchSyncReqV0 {
heads,
known_heads,
known_commits,
})),
)
.await
}
pub fn leave(&self) {}
pub fn topic_connect(&self, id: TopicId) -> TopicSubscription<T> {
let (s, mut r1) = broadcast(128); // FIXME this should be done only once, in the Broker
TopicSubscription {
id,
overlay_cnx: self,
event_stream: r1.clone(),
}
}
pub async fn delete_object(&mut self, id: ObjectId) -> Result<(), ProtocolError> {
self.broker
.process_overlay_request(
self.overlay,
BrokerOverlayRequestContentV0::ObjectDel(ObjectDel::V0(ObjectDelV0 { id })),
)
.await
}
pub async fn pin_object(&mut self, id: ObjectId) -> Result<(), ProtocolError> {
self.broker
.process_overlay_request(
self.overlay,
BrokerOverlayRequestContentV0::ObjectPin(ObjectPin::V0(ObjectPinV0 { id })),
)
.await
}
pub async fn unpin_object(&mut self, id: ObjectId) -> Result<(), ProtocolError> {
self.broker
.process_overlay_request(
self.overlay,
BrokerOverlayRequestContentV0::ObjectUnpin(ObjectUnpin::V0(ObjectUnpinV0 { id })),
)
.await
}
pub async fn copy_object(
&mut self,
id: ObjectId,
expiry: Option<Timestamp>,
) -> Result<ObjectId, ProtocolError> {
self.broker
.process_overlay_request_objectid_response(
self.overlay,
BrokerOverlayRequestContentV0::ObjectCopy(ObjectCopy::V0(ObjectCopyV0 {
id,
expiry,
})),
)
.await
}
pub async fn get_block(
&mut self,
id: BlockId,
include_children: bool,
topic: Option<PubKey>,
) -> Result<Pin<Box<T::BlockStream>>, ProtocolError> {
self.broker
.process_overlay_request_stream_response(
self.overlay,
BrokerOverlayRequestContentV0::BlockGet(BlockGet::V0(BlockGetV0 {
id,
include_children,
topic,
})),
)
.await
}
pub async fn get_object(
&mut self,
id: ObjectId,
topic: Option<PubKey>,
) -> Result<Object, ProtocolError> {
let mut blockstream = self.get_block(id, true, topic).await?;
let mut store = HashMapRepoStore::new();
while let Some(block) = blockstream.next().await {
store.put(&block).unwrap();
}
Object::load(id, None, &store).map_err(|e| match e {
ObjectParseError::MissingBlocks(_missing) => ProtocolError::MissingBlocks,
_ => ProtocolError::ObjectParseError,
})
}
pub async fn put_block(&mut self, block: &Block) -> Result<BlockId, ProtocolError> {
self.broker
.process_overlay_request(
self.overlay,
BrokerOverlayRequestContentV0::BlockPut(BlockPut::V0(block.clone())),
)
.await?;
Ok(block.id())
}
// TODO maybe implement a put_block_with_children ? that would behave like put_object, but taking in a parent Blockk instead of a content
pub async fn put_object(
&mut self,
content: ObjectContent,
deps: Vec<ObjectId>,
expiry: Option<Timestamp>,
max_object_size: usize,
repo_pubkey: PubKey,
repo_secret: SymKey,
) -> Result<ObjectId, ProtocolError> {
let obj = Object::new(
content,
deps,
expiry,
max_object_size,
repo_pubkey,
repo_secret,
);
debug_println!("object has {} blocks", obj.blocks().len());
let mut deduplicated: HashSet<ObjectId> = HashSet::new();
for block in obj.blocks() {
let id = block.id();
if deduplicated.get(&id).is_none() {
let _ = self.put_block(block).await?;
deduplicated.insert(id);
}
}
Ok(obj.id())
}
}
pub struct TopicSubscription<'a, T>
where
T: BrokerConnection,
{
id: TopicId,
overlay_cnx: &'a OverlayConnectionClient<'a, T>,
event_stream: Receiver<Event>,
}
impl<'a, T> TopicSubscription<'a, T>
where
T: BrokerConnection,
{
pub fn unsubscribe(&self) {}
pub fn disconnect(&self) {}
pub fn get_branch_heads(&self) {}
pub fn get_event_stream(&self) -> &Receiver<Event> {
&self.event_stream
}
}

@ -0,0 +1,162 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// This code is partly derived from work written by TG x Thoth from P2Pcollab.
// Copyright 2022 TG x Thoth
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use crate::types::BrokerMessage;
use core::fmt;
use p2p_repo::object::ObjectParseError;
use p2p_repo::types::Block;
use p2p_repo::types::ObjectId;
use num_enum::IntoPrimitive;
use num_enum::TryFromPrimitive;
use std::convert::From;
use std::convert::TryFrom;
use std::error::Error;
#[derive(Debug, Eq, PartialEq, TryFromPrimitive, IntoPrimitive, Clone)]
#[repr(u16)]
pub enum ProtocolError {
WriteError = 1,
ActorError,
InvalidState,
SignatureError,
InvalidSignature,
SerializationError,
PartialContent,
AccessDenied,
OverlayNotJoined,
OverlayNotFound,
BrokerError,
NotFound,
EndOfStream,
StoreError,
MissingBlocks,
ObjectParseError,
InvalidValue,
UserAlreadyExists,
RepoIdRequired,
Closing,
ConnectionError,
}
impl ProtocolError {
pub fn is_stream(&self) -> bool {
*self == ProtocolError::PartialContent || *self == ProtocolError::EndOfStream
}
}
impl Error for ProtocolError {}
impl fmt::Display for ProtocolError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl From<p2p_repo::errors::NgError> for ProtocolError {
fn from(e: p2p_repo::errors::NgError) -> Self {
match e {
p2p_repo::errors::NgError::InvalidSignature => ProtocolError::InvalidSignature,
p2p_repo::errors::NgError::SerializationError => ProtocolError::SerializationError,
}
}
}
impl From<ObjectParseError> for ProtocolError {
fn from(e: ObjectParseError) -> Self {
ProtocolError::ObjectParseError
}
}
impl From<p2p_repo::store::StorageError> for ProtocolError {
fn from(e: p2p_repo::store::StorageError) -> Self {
match e {
p2p_repo::store::StorageError::NotFound => ProtocolError::NotFound,
p2p_repo::store::StorageError::InvalidValue => ProtocolError::InvalidValue,
_ => ProtocolError::StoreError,
}
}
}
impl From<serde_bare::error::Error> for ProtocolError {
fn from(e: serde_bare::error::Error) -> Self {
ProtocolError::SerializationError
}
}
impl From<BrokerMessage> for Result<(), ProtocolError> {
fn from(msg: BrokerMessage) -> Self {
if !msg.is_response() {
panic!("BrokerMessage is not a response");
}
match msg.result() {
0 => Ok(()),
err => Err(ProtocolError::try_from(err).unwrap()),
}
}
}
impl From<BrokerMessage> for Result<ObjectId, ProtocolError> {
fn from(msg: BrokerMessage) -> Self {
if !msg.is_response() {
panic!("BrokerMessage is not a response");
}
match msg.result() {
0 => Ok(msg.response_object_id()),
err => Err(ProtocolError::try_from(err).unwrap()),
}
}
}
/// Option represents if a Block is available. cannot be returned here. call BrokerMessage.response_block() to get a reference to it.
impl From<BrokerMessage> for Result<Option<u16>, ProtocolError> {
fn from(msg: BrokerMessage) -> Self {
if !msg.is_response() {
panic!("BrokerMessage is not a response");
}
//let partial: u16 = ProtocolError::PartialContent.into();
let res = msg.result();
if res == 0 || ProtocolError::try_from(res).unwrap().is_stream() {
if msg.is_overlay() {
match msg.response_block() {
Some(_) => Ok(Some(res)),
None => Ok(None),
}
} else {
Ok(None)
}
} else {
Err(ProtocolError::try_from(res).unwrap())
}
}
}
/// Option represents if a Block is available. returns a clone.
impl From<BrokerMessage> for Result<Option<Block>, ProtocolError> {
fn from(msg: BrokerMessage) -> Self {
if !msg.is_response() {
panic!("BrokerMessage is not a response");
}
//let partial: u16 = ProtocolError::PartialContent.into();
let res = msg.result();
if res == 0 || ProtocolError::try_from(res).unwrap().is_stream() {
if msg.is_overlay() {
match msg.response_block() {
Some(b) => Ok(Some(b.clone())),
None => Ok(None),
}
} else {
Ok(None)
}
} else {
Err(ProtocolError::try_from(res).unwrap())
}
}
}

@ -0,0 +1,5 @@
pub mod types;
pub mod errors;
pub mod broker_connection;

File diff suppressed because it is too large Load Diff

@ -0,0 +1,20 @@
[package]
name = "p2p-repo"
version = "0.1.0"
edition = "2021"
license = "MIT/Apache-2.0"
authors = ["Niko PLP <niko@nextgraph.org>"]
description = "P2P repository module of NextGraph"
repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs"
[dependencies]
blake3 = "1.3.1"
chacha20 = "0.9.0"
ed25519-dalek = "1.0.1"
rand = "0.7"
serde = { version = "1.0.142", features = ["derive"] }
serde_bare = "0.5.0"
serde_bytes = "0.11.7"
fastbloom-rs = "0.3.1"
debug_print = "1.0.0"
hex = "0.4.3"

@ -0,0 +1,116 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// This code is partly derived from work written by TG x Thoth from P2Pcollab.
// Copyright 2022 TG x Thoth
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Immutable Block
use crate::types::*;
impl BlockV0 {
pub fn new(
children: Vec<BlockId>,
deps: ObjectDeps,
expiry: Option<Timestamp>,
content: Vec<u8>,
key: Option<SymKey>,
) -> BlockV0 {
let mut b = BlockV0 {
id: None,
key,
children,
deps,
expiry,
content,
};
let block = Block::V0(b.clone());
b.id = Some(block.get_id());
b
}
}
impl Block {
pub fn new(
children: Vec<BlockId>,
deps: ObjectDeps,
expiry: Option<Timestamp>,
content: Vec<u8>,
key: Option<SymKey>,
) -> Block {
Block::V0(BlockV0::new(children, deps, expiry, content, key))
}
/// Compute the ID
pub fn get_id(&self) -> BlockId {
let ser = serde_bare::to_vec(self).unwrap();
let hash = blake3::hash(ser.as_slice());
Digest::Blake3Digest32(hash.as_bytes().clone())
}
/// Get the already computed ID
pub fn id(&self) -> BlockId {
match self {
Block::V0(b) => match b.id {
Some(id) => id,
None => self.get_id(),
},
}
}
/// Get the content
pub fn content(&self) -> &Vec<u8> {
match self {
Block::V0(b) => &b.content,
}
}
/// Get the children
pub fn children(&self) -> &Vec<BlockId> {
match self {
Block::V0(b) => &b.children,
}
}
/// Get the dependencies
pub fn deps(&self) -> &ObjectDeps {
match self {
Block::V0(b) => &b.deps,
}
}
/// Get the expiry
pub fn expiry(&self) -> Option<Timestamp> {
match self {
Block::V0(b) => b.expiry,
}
}
pub fn set_expiry(&mut self, expiry: Option<Timestamp>) {
match self {
Block::V0(b) => {
b.id = None;
b.expiry = expiry
}
}
}
/// Get the key
pub fn key(&self) -> Option<SymKey> {
match self {
Block::V0(b) => b.key,
}
}
/// Set the key
pub fn set_key(&mut self, key: Option<SymKey>) {
match self {
Block::V0(b) => b.key = key,
}
}
}

@ -0,0 +1,581 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// This code is partly derived from work written by TG x Thoth from P2Pcollab.
// Copyright 2022 TG x Thoth
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Branch of a Repository
use debug_print::*;
use std::collections::{HashMap, HashSet};
use fastbloom_rs::{BloomFilter as Filter, Membership};
use crate::object::*;
use crate::store::*;
use crate::types::*;
impl MemberV0 {
/// New member
pub fn new(id: PubKey, commit_types: Vec<CommitType>, metadata: Vec<u8>) -> MemberV0 {
MemberV0 {
id,
commit_types,
metadata,
}
}
/// Check whether this member has permission for the given commit type
pub fn has_perm(&self, commit_type: CommitType) -> bool {
self.commit_types.contains(&commit_type)
}
}
impl Member {
/// New member
pub fn new(id: PubKey, commit_types: Vec<CommitType>, metadata: Vec<u8>) -> Member {
Member::V0(MemberV0::new(id, commit_types, metadata))
}
/// Check whether this member has permission for the given commit type
pub fn has_perm(&self, commit_type: CommitType) -> bool {
match self {
Member::V0(m) => m.has_perm(commit_type),
}
}
}
impl BranchV0 {
pub fn new(
id: PubKey,
topic: PubKey,
secret: SymKey,
members: Vec<MemberV0>,
quorum: HashMap<CommitType, u32>,
ack_delay: RelTime,
tags: Vec<u8>,
metadata: Vec<u8>,
) -> BranchV0 {
BranchV0 {
id,
topic,
secret,
members,
quorum,
ack_delay,
tags,
metadata,
}
}
}
impl Branch {
pub fn new(
id: PubKey,
topic: PubKey,
secret: SymKey,
members: Vec<MemberV0>,
quorum: HashMap<CommitType, u32>,
ack_delay: RelTime,
tags: Vec<u8>,
metadata: Vec<u8>,
) -> Branch {
Branch::V0(BranchV0::new(
id, topic, secret, members, quorum, ack_delay, tags, metadata,
))
}
/// Get member by ID
pub fn get_member(&self, id: &PubKey) -> Option<&MemberV0> {
match self {
Branch::V0(b) => {
for m in b.members.iter() {
if m.id == *id {
return Some(m);
}
}
}
}
None
}
/// Branch sync request from another peer
///
/// Return ObjectIds to send
pub fn sync_req(
our_heads: &[ObjectId],
their_heads: &[ObjectId],
their_filter: &BloomFilter,
store: &impl RepoStore,
) -> Result<Vec<ObjectId>, ObjectParseError> {
//debug_println!(">> sync_req");
//debug_println!(" our_heads: {:?}", our_heads);
//debug_println!(" their_heads: {:?}", their_heads);
/// Load `Commit` `Object`s of a `Branch` from the `RepoStore` starting from the given `Object`,
/// and collect `ObjectId`s starting from `our_heads` towards `their_heads`
fn load_branch(
cobj: &Object,
store: &impl RepoStore,
their_heads: &[ObjectId],
visited: &mut HashSet<ObjectId>,
missing: &mut HashSet<ObjectId>,
) -> Result<bool, ObjectParseError> {
//debug_println!(">>> load_branch: {}", cobj.id());
let id = cobj.id();
// root has no deps
let is_root = cobj.deps().len() == 0;
//debug_println!(" deps: {:?}", cobj.deps());
// check if this commit object is present in their_heads
let mut their_head_found = their_heads.contains(&id);
// load deps, stop at the root or if this is a commit object from their_heads
if !is_root && !their_head_found {
visited.insert(id);
for id in cobj.deps() {
match Object::load(*id, None, store) {
Ok(o) => {
if !visited.contains(id) {
if load_branch(&o, store, their_heads, visited, missing)? {
their_head_found = true;
}
}
}
Err(ObjectParseError::MissingBlocks(m)) => {
missing.extend(m);
}
Err(e) => return Err(e),
}
}
}
Ok(their_head_found)
}
// missing commits from our branch
let mut missing = HashSet::new();
// our commits
let mut ours = HashSet::new();
// their commits
let mut theirs = HashSet::new();
// collect all commits reachable from our_heads
// up to the root or until encountering a commit from their_heads
for id in our_heads {
let cobj = Object::load(*id, None, store)?;
let mut visited = HashSet::new();
let their_head_found =
load_branch(&cobj, store, their_heads, &mut visited, &mut missing)?;
//debug_println!("<<< load_branch: {}", their_head_found);
ours.extend(visited); // add if one of their_heads found
}
// collect all commits reachable from their_heads
for id in their_heads {
let cobj = Object::load(*id, None, store)?;
let mut visited = HashSet::new();
let their_head_found = load_branch(&cobj, store, &[], &mut visited, &mut missing)?;
//debug_println!("<<< load_branch: {}", their_head_found);
theirs.extend(visited); // add if one of their_heads found
}
let mut result = &ours - &theirs;
//debug_println!("!! ours: {:?}", ours);
//debug_println!("!! theirs: {:?}", theirs);
//debug_println!("!! result: {:?}", result);
// remove their_commits from result
let filter = Filter::from_u8_array(their_filter.f.as_slice(), their_filter.k.into());
for id in result.clone() {
match id {
Digest::Blake3Digest32(d) => {
if filter.contains(&d) {
result.remove(&id);
}
}
}
}
//debug_println!("!! result filtered: {:?}", result);
Ok(Vec::from_iter(result))
}
}
mod test {
use std::collections::HashMap;
use ed25519_dalek::*;
use fastbloom_rs::{BloomFilter as Filter, FilterBuilder, Membership};
use rand::rngs::OsRng;
use crate::branch::*;
use crate::commit::*;
use crate::object::*;
use crate::repo;
use crate::store::*;
#[test]
pub fn test_branch() {
fn add_obj(
content: ObjectContent,
deps: Vec<ObjectId>,
expiry: Option<Timestamp>,
repo_pubkey: PubKey,
repo_secret: SymKey,
store: &mut impl RepoStore,
) -> ObjectRef {
let max_object_size = 4000;
let obj = Object::new(
content,
deps,
expiry,
max_object_size,
repo_pubkey,
repo_secret,
);
println!(">>> add_obj");
println!(" id: {:?}", obj.id());
println!(" deps: {:?}", obj.deps());
obj.save(store).unwrap();
obj.reference().unwrap()
}
fn add_commit(
branch: ObjectRef,
author_privkey: PrivKey,
author_pubkey: PubKey,
seq: u32,
deps: Vec<ObjectRef>,
acks: Vec<ObjectRef>,
body_ref: ObjectRef,
repo_pubkey: PubKey,
repo_secret: SymKey,
store: &mut impl RepoStore,
) -> ObjectRef {
let mut obj_deps: Vec<ObjectId> = vec![];
obj_deps.extend(deps.iter().map(|r| r.id));
obj_deps.extend(acks.iter().map(|r| r.id));
let obj_ref = ObjectRef {
id: ObjectId::Blake3Digest32([1; 32]),
key: SymKey::ChaCha20Key([2; 32]),
};
let refs = vec![obj_ref];
let metadata = vec![5u8; 55];
let expiry = None;
let commit = Commit::new(
author_privkey,
author_pubkey,
seq,
branch,
deps,
acks,
refs,
metadata,
body_ref,
expiry,
)
.unwrap();
//println!("commit: {:?}", commit);
add_obj(
ObjectContent::Commit(commit),
obj_deps,
expiry,
repo_pubkey,
repo_secret,
store,
)
}
fn add_body_branch(
branch: Branch,
repo_pubkey: PubKey,
repo_secret: SymKey,
store: &mut impl RepoStore,
) -> ObjectRef {
let deps = vec![];
let expiry = None;
let body = CommitBody::Branch(branch);
//println!("body: {:?}", body);
add_obj(
ObjectContent::CommitBody(body),
deps,
expiry,
repo_pubkey,
repo_secret,
store,
)
}
fn add_body_trans(
deps: Vec<ObjectId>,
repo_pubkey: PubKey,
repo_secret: SymKey,
store: &mut impl RepoStore,
) -> ObjectRef {
let expiry = None;
let content = [7u8; 777].to_vec();
let body = CommitBody::Transaction(Transaction::V0(content));
//println!("body: {:?}", body);
add_obj(
ObjectContent::CommitBody(body),
deps,
expiry,
repo_pubkey,
repo_secret,
store,
)
}
fn add_body_ack(
deps: Vec<ObjectId>,
repo_pubkey: PubKey,
repo_secret: SymKey,
store: &mut impl RepoStore,
) -> ObjectRef {
let expiry = None;
let body = CommitBody::Ack(Ack::V0());
//println!("body: {:?}", body);
add_obj(
ObjectContent::CommitBody(body),
deps,
expiry,
repo_pubkey,
repo_secret,
store,
)
}
let mut store = HashMapRepoStore::new();
let mut rng = OsRng {};
// repo
let repo_keypair: Keypair = Keypair::generate(&mut rng);
println!(
"repo private key: ({}) {:?}",
repo_keypair.secret.as_bytes().len(),
repo_keypair.secret.as_bytes()
);
println!(
"repo public key: ({}) {:?}",
repo_keypair.public.as_bytes().len(),
repo_keypair.public.as_bytes()
);
let _repo_privkey = PrivKey::Ed25519PrivKey(repo_keypair.secret.to_bytes());
let repo_pubkey = PubKey::Ed25519PubKey(repo_keypair.public.to_bytes());
let repo_secret = SymKey::ChaCha20Key([9; 32]);
// branch
let branch_keypair: Keypair = Keypair::generate(&mut rng);
println!("branch public key: {:?}", branch_keypair.public.as_bytes());
let branch_pubkey = PubKey::Ed25519PubKey(branch_keypair.public.to_bytes());
let member_keypair: Keypair = Keypair::generate(&mut rng);
println!("member public key: {:?}", member_keypair.public.as_bytes());
let member_privkey = PrivKey::Ed25519PrivKey(member_keypair.secret.to_bytes());
let member_pubkey = PubKey::Ed25519PubKey(member_keypair.public.to_bytes());
let metadata = [66u8; 64].to_vec();
let commit_types = vec![CommitType::Ack, CommitType::Transaction];
let secret = SymKey::ChaCha20Key([0; 32]);
let member = MemberV0::new(member_pubkey, commit_types, metadata.clone());
let members = vec![member];
let mut quorum = HashMap::new();
quorum.insert(CommitType::Transaction, 3);
let ack_delay = RelTime::Minutes(3);
let tags = [99u8; 32].to_vec();
let branch = Branch::new(
branch_pubkey,
branch_pubkey,
secret,
members,
quorum,
ack_delay,
tags,
metadata,
);
//println!("branch: {:?}", branch);
fn print_branch() {
println!("branch deps/acks:");
println!("");
println!(" br");
println!(" / \\");
println!(" t1 t2");
println!(" / \\ / \\");
println!(" a3 t4<--t5-->(t1)");
println!(" / \\");
println!(" a6 a7");
println!("");
}
print_branch();
// commit bodies
let branch_body = add_body_branch(
branch.clone(),
repo_pubkey.clone(),
repo_secret.clone(),
&mut store,
);
let ack_body = add_body_ack(vec![], repo_pubkey, repo_secret, &mut store);
let trans_body = add_body_trans(vec![], repo_pubkey, repo_secret, &mut store);
// create & add commits to store
println!(">> br");
let br = add_commit(
branch_body,
member_privkey,
member_pubkey,
0,
vec![],
vec![],
branch_body,
repo_pubkey,
repo_secret,
&mut store,
);
println!(">> t1");
let t1 = add_commit(
branch_body,
member_privkey,
member_pubkey,
1,
vec![br],
vec![],
trans_body,
repo_pubkey,
repo_secret,
&mut store,
);
println!(">> t2");
let t2 = add_commit(
branch_body,
member_privkey,
member_pubkey,
2,
vec![br],
vec![],
trans_body,
repo_pubkey,
repo_secret,
&mut store,
);
println!(">> a3");
let a3 = add_commit(
branch_body,
member_privkey,
member_pubkey,
3,
vec![t1],
vec![],
ack_body,
repo_pubkey,
repo_secret,
&mut store,
);
println!(">> t4");
let t4 = add_commit(
branch_body,
member_privkey,
member_pubkey,
4,
vec![t2],
vec![t1],
trans_body,
repo_pubkey,
repo_secret,
&mut store,
);
println!(">> t5");
let t5 = add_commit(
branch_body,
member_privkey,
member_pubkey,
5,
vec![t1, t2],
vec![t4],
trans_body,
repo_pubkey,
repo_secret,
&mut store,
);
println!(">> a6");
let a6 = add_commit(
branch_body,
member_privkey,
member_pubkey,
6,
vec![t4],
vec![],
ack_body,
repo_pubkey,
repo_secret,
&mut store,
);
println!(">> a7");
let a7 = add_commit(
branch_body,
member_privkey,
member_pubkey,
7,
vec![t4],
vec![],
ack_body,
repo_pubkey,
repo_secret,
&mut store,
);
let c7 = Commit::load(a7, &store).unwrap();
c7.verify(&branch, &store).unwrap();
let mut filter = Filter::new(FilterBuilder::new(10, 0.01));
for commit_ref in [br, t1, t2, a3, t5, a6] {
match commit_ref.id {
ObjectId::Blake3Digest32(d) => filter.add(&d),
}
}
let cfg = filter.config();
let their_commits = BloomFilter {
k: cfg.hashes,
f: filter.get_u8_array().to_vec(),
};
print_branch();
println!(">> sync_req");
println!(" our_heads: [a3, t5, a6, a7]");
println!(" their_heads: [a3, t5]");
println!(" their_commits: [br, t1, t2, a3, t5, a6]");
let ids = Branch::sync_req(
&[a3.id, t5.id, a6.id, a7.id],
&[a3.id, t5.id],
&their_commits,
&store,
)
.unwrap();
assert_eq!(ids.len(), 1);
assert!(ids.contains(&a7.id));
}
}

@ -0,0 +1,64 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use crate::store::{StorageError};
pub trait BrokerStore {
/// Load a property from the store.
fn get(&self, prefix: u8, key: &Vec<u8>, suffix: Option<u8>) -> Result<Vec<u8>, StorageError>;
/// Load all the values of a property from the store.
fn get_all(
&self,
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
) -> Result<Vec<Vec<u8>>, StorageError>;
/// Check if a specific value exists for a property from the store.
fn has_property_value(
&self,
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
value: Vec<u8>,
) -> Result<(), StorageError>;
/// Save a property value to the store.
fn put(
&self,
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
value: Vec<u8>,
) -> Result<(), StorageError>;
/// Replace the property of a key (single value) to the store.
fn replace(
&self,
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
value: Vec<u8>,
) -> Result<(), StorageError>;
/// Delete a property from the store.
fn del(&self, prefix: u8, key: &Vec<u8>, suffix: Option<u8>) -> Result<(), StorageError>;
/// Delete all properties of a key from the store.
fn del_all(&self, prefix: u8, key: &Vec<u8>, all_suffixes: &[u8]) -> Result<(), StorageError>;
/// Delete a specific value for a property from the store.
fn del_property_value(
&self,
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
value: Vec<u8>,
) -> Result<(), StorageError>;
}

@ -0,0 +1,454 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// This code is partly derived from work written by TG x Thoth from P2Pcollab.
// Copyright 2022 TG x Thoth
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Commit
use debug_print::*;
use ed25519_dalek::*;
use std::collections::HashSet;
use std::iter::FromIterator;
use crate::object::*;
use crate::store::*;
use crate::types::*;
#[derive(Debug)]
pub enum CommitLoadError {
MissingBlocks(Vec<BlockId>),
ObjectParseError,
DeserializeError,
}
#[derive(Debug)]
pub enum CommitVerifyError {
InvalidSignature,
PermissionDenied,
BodyLoadError(CommitLoadError),
DepLoadError(CommitLoadError),
}
impl CommitBody {
/// Get CommitType corresponding to CommitBody
pub fn to_type(&self) -> CommitType {
match self {
CommitBody::Ack(_) => CommitType::Ack,
CommitBody::AddBranch(_) => CommitType::AddBranch,
CommitBody::AddMembers(_) => CommitType::AddMembers,
CommitBody::Branch(_) => CommitType::Branch,
CommitBody::EndOfBranch(_) => CommitType::EndOfBranch,
CommitBody::RemoveBranch(_) => CommitType::RemoveBranch,
CommitBody::Repository(_) => CommitType::Repository,
CommitBody::Snapshot(_) => CommitType::Snapshot,
CommitBody::Transaction(_) => CommitType::Transaction,
}
}
}
impl CommitV0 {
/// New commit
pub fn new(
author_privkey: PrivKey,
author_pubkey: PubKey,
seq: u32,
branch: ObjectRef,
deps: Vec<ObjectRef>,
acks: Vec<ObjectRef>,
refs: Vec<ObjectRef>,
metadata: Vec<u8>,
body: ObjectRef,
expiry: Option<Timestamp>,
) -> Result<CommitV0, SignatureError> {
let content = CommitContentV0 {
author: author_pubkey,
seq,
branch,
deps,
acks,
refs,
metadata,
body,
expiry,
};
let content_ser = serde_bare::to_vec(&content).unwrap();
// sign commit
let kp = match (author_privkey, author_pubkey) {
(PrivKey::Ed25519PrivKey(sk), PubKey::Ed25519PubKey(pk)) => [sk, pk].concat(),
};
let keypair = Keypair::from_bytes(kp.as_slice())?;
let sig_bytes = keypair.sign(content_ser.as_slice()).to_bytes();
let mut it = sig_bytes.chunks_exact(32);
let mut ss: Ed25519Sig = [[0; 32], [0; 32]];
ss[0].copy_from_slice(it.next().unwrap());
ss[1].copy_from_slice(it.next().unwrap());
let sig = Sig::Ed25519Sig(ss);
Ok(CommitV0 {
content,
sig,
id: None,
key: None,
})
}
}
impl Commit {
/// New commit
pub fn new(
author_privkey: PrivKey,
author_pubkey: PubKey,
seq: u32,
branch: ObjectRef,
deps: Vec<ObjectRef>,
acks: Vec<ObjectRef>,
refs: Vec<ObjectRef>,
metadata: Vec<u8>,
body: ObjectRef,
expiry: Option<Timestamp>,
) -> Result<Commit, SignatureError> {
CommitV0::new(
author_privkey,
author_pubkey,
seq,
branch,
deps,
acks,
refs,
metadata,
body,
expiry,
)
.map(|c| Commit::V0(c))
}
/// Load commit from store
pub fn load(commit_ref: ObjectRef, store: &impl RepoStore) -> Result<Commit, CommitLoadError> {
let (id, key) = (commit_ref.id, commit_ref.key);
match Object::load(id, Some(key), store) {
Ok(obj) => {
let content = obj
.content()
.map_err(|_e| CommitLoadError::ObjectParseError)?;
let mut commit = match content {
ObjectContent::Commit(c) => c,
_ => return Err(CommitLoadError::DeserializeError),
};
commit.set_id(id);
commit.set_key(key);
Ok(commit)
}
Err(ObjectParseError::MissingBlocks(missing)) => {
Err(CommitLoadError::MissingBlocks(missing))
}
Err(_) => Err(CommitLoadError::ObjectParseError),
}
}
/// Load commit body from store
pub fn load_body(&self, store: &impl RepoStore) -> Result<CommitBody, CommitLoadError> {
let content = self.content();
let (id, key) = (content.body.id, content.body.key);
let obj = Object::load(id.clone(), Some(key.clone()), store).map_err(|e| match e {
ObjectParseError::MissingBlocks(missing) => CommitLoadError::MissingBlocks(missing),
_ => CommitLoadError::ObjectParseError,
})?;
let content = obj
.content()
.map_err(|_e| CommitLoadError::ObjectParseError)?;
match content {
ObjectContent::CommitBody(body) => Ok(body),
_ => Err(CommitLoadError::DeserializeError),
}
}
/// Get ID of parent `Object`
pub fn id(&self) -> Option<ObjectId> {
match self {
Commit::V0(c) => c.id,
}
}
/// Set ID of parent `Object`
pub fn set_id(&mut self, id: ObjectId) {
match self {
Commit::V0(c) => c.id = Some(id),
}
}
/// Get key of parent `Object`
pub fn key(&self) -> Option<SymKey> {
match self {
Commit::V0(c) => c.key,
}
}
/// Set key of parent `Object`
pub fn set_key(&mut self, key: SymKey) {
match self {
Commit::V0(c) => c.key = Some(key),
}
}
/// Get commit signature
pub fn sig(&self) -> &Sig {
match self {
Commit::V0(c) => &c.sig,
}
}
/// Get commit content
pub fn content(&self) -> &CommitContentV0 {
match self {
Commit::V0(c) => &c.content,
}
}
/// Get acks
pub fn acks(&self) -> Vec<ObjectRef> {
match self {
Commit::V0(c) => c.content.acks.clone(),
}
}
/// Get deps
pub fn deps(&self) -> Vec<ObjectRef> {
match self {
Commit::V0(c) => c.content.deps.clone(),
}
}
/// Get all direct commit dependencies of the commit (`deps`, `acks`)
pub fn deps_acks(&self) -> Vec<ObjectRef> {
match self {
Commit::V0(c) => [c.content.acks.clone(), c.content.deps.clone()].concat(),
}
}
/// Get seq
pub fn seq(&self) -> u32 {
match self {
Commit::V0(c) => c.content.seq,
}
}
/// Verify commit signature
pub fn verify_sig(&self) -> Result<(), SignatureError> {
let c = match self {
Commit::V0(c) => c,
};
let content_ser = serde_bare::to_vec(&c.content).unwrap();
let pubkey = match c.content.author {
PubKey::Ed25519PubKey(pk) => pk,
};
let pk = PublicKey::from_bytes(&pubkey)?;
let sig_bytes = match c.sig {
Sig::Ed25519Sig(ss) => [ss[0], ss[1]].concat(),
};
let sig = Signature::from_bytes(&sig_bytes)?;
pk.verify_strict(&content_ser, &sig)
}
/// Verify commit permissions
pub fn verify_perm(&self, body: &CommitBody, branch: &Branch) -> Result<(), CommitVerifyError> {
let content = self.content();
match branch.get_member(&content.author) {
Some(m) => {
if m.has_perm(body.to_type()) {
return Ok(());
}
}
None => (),
}
Err(CommitVerifyError::PermissionDenied)
}
/// Verify if the commit's `body` and dependencies (`deps` & `acks`) are available in the `store`
pub fn verify_deps(&self, store: &impl RepoStore) -> Result<Vec<ObjectId>, CommitLoadError> {
//debug_println!(">> verify_deps: #{}", self.seq());
/// Load `Commit`s of a `Branch` from the `RepoStore` starting from the given `Commit`,
/// and collect missing `ObjectId`s
fn load_branch(
commit: &Commit,
store: &impl RepoStore,
visited: &mut HashSet<ObjectId>,
missing: &mut HashSet<ObjectId>,
) -> Result<(), CommitLoadError> {
//debug_println!(">>> load_branch: #{}", commit.seq());
// the commit verify_deps() was called on may not have an ID set,
// but the commits loaded from store should have it
match commit.id() {
Some(id) => {
if visited.contains(&id) {
return Ok(());
}
visited.insert(id);
}
None => (),
}
// load body & check if it's the Branch commit at the root
let is_root = match commit.load_body(store) {
Ok(body) => body.to_type() == CommitType::Branch,
Err(CommitLoadError::MissingBlocks(m)) => {
missing.extend(m);
false
}
Err(e) => return Err(e),
};
debug_println!("!!! is_root: {}", is_root);
// load deps
if !is_root {
for dep in commit.deps_acks() {
match Commit::load(dep, store) {
Ok(c) => {
load_branch(&c, store, visited, missing)?;
}
Err(CommitLoadError::MissingBlocks(m)) => {
missing.extend(m);
}
Err(e) => return Err(e),
}
}
}
Ok(())
}
let mut visited = HashSet::new();
let mut missing = HashSet::new();
load_branch(self, store, &mut visited, &mut missing)?;
if !missing.is_empty() {
return Err(CommitLoadError::MissingBlocks(Vec::from_iter(missing)));
}
Ok(Vec::from_iter(visited))
}
/// Verify signature, permissions, and dependencies
pub fn verify(&self, branch: &Branch, store: &impl RepoStore) -> Result<(), CommitVerifyError> {
self.verify_sig()
.map_err(|_e| CommitVerifyError::InvalidSignature)?;
let body = self
.load_body(store)
.map_err(|e| CommitVerifyError::BodyLoadError(e))?;
self.verify_perm(&body, branch)?;
self.verify_deps(store)
.map_err(|e| CommitVerifyError::DepLoadError(e))?;
Ok(())
}
}
mod test {
use std::collections::HashMap;
use ed25519_dalek::*;
use rand::rngs::OsRng;
use crate::branch::*;
use crate::commit::*;
use crate::store::*;
use crate::types::*;
#[test]
pub fn test_commit() {
let mut csprng = OsRng {};
let keypair: Keypair = Keypair::generate(&mut csprng);
println!(
"private key: ({}) {:?}",
keypair.secret.as_bytes().len(),
keypair.secret.as_bytes()
);
println!(
"public key: ({}) {:?}",
keypair.public.as_bytes().len(),
keypair.public.as_bytes()
);
let ed_priv_key = keypair.secret.to_bytes();
let ed_pub_key = keypair.public.to_bytes();
let priv_key = PrivKey::Ed25519PrivKey(ed_priv_key);
let pub_key = PubKey::Ed25519PubKey(ed_pub_key);
let seq = 3;
let obj_ref = ObjectRef {
id: ObjectId::Blake3Digest32([1; 32]),
key: SymKey::ChaCha20Key([2; 32]),
};
let obj_refs = vec![obj_ref];
let branch = obj_ref.clone();
let deps = obj_refs.clone();
let acks = obj_refs.clone();
let refs = obj_refs.clone();
let metadata = vec![1, 2, 3];
let body_ref = obj_ref.clone();
let expiry = Some(2342);
let commit = Commit::new(
priv_key, pub_key, seq, branch, deps, acks, refs, metadata, body_ref, expiry,
)
.unwrap();
println!("commit: {:?}", commit);
let store = HashMapRepoStore::new();
let metadata = [66u8; 64].to_vec();
let commit_types = vec![CommitType::Ack, CommitType::Transaction];
let key: [u8; 32] = [0; 32];
let secret = SymKey::ChaCha20Key(key);
let member = MemberV0::new(pub_key, commit_types, metadata.clone());
let members = vec![member];
let mut quorum = HashMap::new();
quorum.insert(CommitType::Transaction, 3);
let ack_delay = RelTime::Minutes(3);
let tags = [99u8; 32].to_vec();
let branch = Branch::new(
pub_key.clone(),
pub_key.clone(),
secret,
members,
quorum,
ack_delay,
tags,
metadata,
);
//println!("branch: {:?}", branch);
let body = CommitBody::Ack(Ack::V0());
//println!("body: {:?}", body);
match commit.load_body(&store) {
Ok(_b) => panic!("Body should not exist"),
Err(CommitLoadError::MissingBlocks(missing)) => {
assert_eq!(missing.len(), 1);
}
Err(e) => panic!("Commit verify error: {:?}", e),
}
let content = commit.content();
println!("content: {:?}", content);
commit.verify_sig().expect("Invalid signature");
commit
.verify_perm(&body, &branch)
.expect("Permission denied");
match commit.verify_deps(&store) {
Ok(_) => panic!("Commit should not be Ok"),
Err(CommitLoadError::MissingBlocks(missing)) => {
assert_eq!(missing.len(), 1);
}
Err(e) => panic!("Commit verify error: {:?}", e),
}
match commit.verify(&branch, &store) {
Ok(_) => panic!("Commit should not be Ok"),
Err(CommitVerifyError::BodyLoadError(CommitLoadError::MissingBlocks(missing))) => {
assert_eq!(missing.len(), 1);
}
Err(e) => panic!("Commit verify error: {:?}", e),
}
}
}

@ -0,0 +1,29 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// This code is partly derived from work written by TG x Thoth from P2Pcollab.
// Copyright 2022 TG x Thoth
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Errors
pub enum NgError {
InvalidSignature,
SerializationError,
}
impl From<serde_bare::error::Error> for NgError {
fn from(e: serde_bare::error::Error) -> Self {
NgError::SerializationError
}
}
impl From<ed25519_dalek::ed25519::Error> for NgError {
fn from(e: ed25519_dalek::ed25519::Error) -> Self {
NgError::InvalidSignature
}
}

@ -0,0 +1,19 @@
pub mod types;
pub mod store;
pub mod block;
pub mod object;
pub mod commit;
pub mod branch;
pub mod repo;
pub mod utils;
pub mod errors;
pub mod broker_store;

@ -0,0 +1,909 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// This code is partly derived from work written by TG x Thoth from P2Pcollab.
// Copyright 2022 TG x Thoth
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Merkle hash tree of Objects
use std::collections::{HashMap, HashSet};
use debug_print::*;
use chacha20::cipher::{KeyIvInit, StreamCipher};
use chacha20::ChaCha20;
use crate::store::*;
use crate::types::*;
/// Size of a serialized empty Block
const EMPTY_BLOCK_SIZE: usize = 12;
/// Size of a serialized BlockId
const BLOCK_ID_SIZE: usize = 33;
/// Size of serialized SymKey
const BLOCK_KEY_SIZE: usize = 33;
/// Size of serialized Object with deps reference.
const EMPTY_ROOT_SIZE_DEPSREF: usize = 77;
/// Extra size needed if depsRef used instead of deps list.
const DEPSREF_OVERLOAD: usize = EMPTY_ROOT_SIZE_DEPSREF - EMPTY_BLOCK_SIZE;
/// Varint extra bytes when reaching the maximum value we will ever use
const BIG_VARINT_EXTRA: usize = 3;
/// Varint extra bytes when reaching the maximum size of data byte arrays.
const DATA_VARINT_EXTRA: usize = 4;
/// Max extra space used by the deps list
const MAX_DEPS_SIZE: usize = 8 * BLOCK_ID_SIZE;
#[derive(Debug)]
pub struct Object {
/// Blocks of the Object (nodes of the tree)
blocks: Vec<Block>,
/// Dependencies
deps: Vec<ObjectId>,
}
/// Object parsing errors
#[derive(Debug)]
pub enum ObjectParseError {
/// Missing blocks
MissingBlocks(Vec<BlockId>),
/// Missing root key
MissingRootKey,
/// Invalid BlockId encountered in the tree
InvalidBlockId,
/// Too many or too few children of a block
InvalidChildren,
/// Number of keys does not match number of children of a block
InvalidKeys,
/// Invalid DepList object content
InvalidDeps,
/// Error deserializing content of a block
BlockDeserializeError,
/// Error deserializing content of the object
ObjectDeserializeError,
}
/// Object copy error
#[derive(Debug)]
pub enum ObjectCopyError {
NotFound,
ParseError,
}
impl Object {
fn convergence_key(repo_pubkey: PubKey, repo_secret: SymKey) -> [u8; blake3::OUT_LEN] {
let key_material = match (repo_pubkey, repo_secret) {
(PubKey::Ed25519PubKey(pubkey), SymKey::ChaCha20Key(secret)) => {
[pubkey, secret].concat()
}
};
blake3::derive_key("NextGraph Data BLAKE3 key", key_material.as_slice())
}
fn make_block(
content: &[u8],
conv_key: &[u8; blake3::OUT_LEN],
children: Vec<ObjectId>,
deps: ObjectDeps,
expiry: Option<Timestamp>,
) -> Block {
let key_hash = blake3::keyed_hash(conv_key, content);
let nonce = [0u8; 12];
let key = key_hash.as_bytes();
let mut cipher = ChaCha20::new(key.into(), &nonce.into());
let mut content_enc = Vec::from(content);
let mut content_enc_slice = &mut content_enc.as_mut_slice();
cipher.apply_keystream(&mut content_enc_slice);
let key = SymKey::ChaCha20Key(key.clone());
let block = Block::new(children, deps, expiry, content_enc, Some(key));
//debug_println!(">>> make_block:");
//debug_println!("!! id: {:?}", obj.id());
//debug_println!("!! children: ({}) {:?}", children.len(), children);
block
}
fn make_deps(
deps_vec: Vec<ObjectId>,
object_size: usize,
repo_pubkey: PubKey,
repo_secret: SymKey,
) -> ObjectDeps {
if deps_vec.len() <= 8 {
ObjectDeps::ObjectIdList(deps_vec)
} else {
let dep_list = DepList::V0(deps_vec);
let dep_obj = Object::new(
ObjectContent::DepList(dep_list),
vec![],
None,
object_size,
repo_pubkey,
repo_secret,
);
let dep_ref = ObjectRef {
id: dep_obj.id(),
key: dep_obj.key().unwrap(),
};
ObjectDeps::DepListRef(dep_ref)
}
}
/// Build tree from leaves, returns parent nodes
fn make_tree(
leaves: &[Block],
conv_key: &ChaCha20Key,
root_deps: &ObjectDeps,
expiry: Option<Timestamp>,
arity: usize,
) -> Vec<Block> {
let mut parents = vec![];
let chunks = leaves.chunks(arity);
let mut it = chunks.peekable();
while let Some(nodes) = it.next() {
let keys = nodes.iter().map(|block| block.key().unwrap()).collect();
let children = nodes.iter().map(|block| block.id()).collect();
let content = BlockContentV0::InternalNode(keys);
let content_ser = serde_bare::to_vec(&content).unwrap();
let child_deps = ObjectDeps::ObjectIdList(vec![]);
let deps = if parents.is_empty() && it.peek().is_none() {
root_deps.clone()
} else {
child_deps
};
parents.push(Self::make_block(
content_ser.as_slice(),
conv_key,
children,
deps,
expiry,
));
}
//debug_println!("parents += {}", parents.len());
if 1 < parents.len() {
let mut great_parents =
Self::make_tree(parents.as_slice(), conv_key, root_deps, expiry, arity);
parents.append(&mut great_parents);
}
parents
}
/// Create new Object from given content
///
/// The Object is chunked and stored in a Merkle tree
/// The arity of the Merkle tree is the maximum that fits in the given `max_object_size`
///
/// Arguments:
/// * `content`: Object content
/// * `deps`: Dependencies of the object
/// * `block_size`: Desired block size for chunking content, rounded up to nearest valid block size
/// * `repo_pubkey`: Repository public key
/// * `repo_secret`: Repository secret
pub fn new(
content: ObjectContent,
deps: Vec<ObjectId>,
expiry: Option<Timestamp>,
block_size: usize,
repo_pubkey: PubKey,
repo_secret: SymKey,
) -> Object {
// create blocks by chunking + encrypting content
let valid_block_size = store_valid_value_size(block_size);
let data_chunk_size = valid_block_size - EMPTY_BLOCK_SIZE - DATA_VARINT_EXTRA;
let mut blocks: Vec<Block> = vec![];
let conv_key = Self::convergence_key(repo_pubkey, repo_secret);
let obj_deps = Self::make_deps(deps.clone(), valid_block_size, repo_pubkey, repo_secret);
let content_ser = serde_bare::to_vec(&content).unwrap();
if EMPTY_BLOCK_SIZE + DATA_VARINT_EXTRA + BLOCK_ID_SIZE * deps.len() + content_ser.len()
<= valid_block_size
{
// content fits in root node
let data_chunk = BlockContentV0::DataChunk(content_ser.clone());
let content_ser = serde_bare::to_vec(&data_chunk).unwrap();
blocks.push(Self::make_block(
content_ser.as_slice(),
&conv_key,
vec![],
obj_deps,
expiry,
));
} else {
// chunk content and create leaf nodes
for chunk in content_ser.chunks(data_chunk_size) {
let data_chunk = BlockContentV0::DataChunk(chunk.to_vec());
let content_ser = serde_bare::to_vec(&data_chunk).unwrap();
blocks.push(Self::make_block(
content_ser.as_slice(),
&conv_key,
vec![],
ObjectDeps::ObjectIdList(vec![]),
expiry,
));
}
// internal nodes
// arity: max number of ObjectRefs that fit inside an InternalNode Object within the object_size limit
let arity: usize =
(valid_block_size - EMPTY_BLOCK_SIZE - BIG_VARINT_EXTRA * 2 - MAX_DEPS_SIZE)
/ (BLOCK_ID_SIZE + BLOCK_KEY_SIZE);
let mut parents =
Self::make_tree(blocks.as_slice(), &conv_key, &obj_deps, expiry, arity);
blocks.append(&mut parents);
}
Object { blocks, deps }
}
pub fn copy(
&self,
expiry: Option<Timestamp>,
repo_pubkey: PubKey,
repo_secret: SymKey,
) -> Result<Object, ObjectCopyError> {
// getting the old object from store
let leaves: Vec<Block> = self.leaves().map_err(|_e| ObjectCopyError::ParseError)?;
let conv_key = Self::convergence_key(repo_pubkey, repo_secret);
let block_size = leaves.first().unwrap().content().len();
let valid_block_size = store_valid_value_size(block_size);
let mut blocks: Vec<Block> = vec![];
for block in leaves {
let mut copy = block.clone();
copy.set_expiry(expiry);
blocks.push(copy);
}
// internal nodes
// arity: max number of ObjectRefs that fit inside an InternalNode Object within the object_size limit
let arity: usize =
(valid_block_size - EMPTY_BLOCK_SIZE - BIG_VARINT_EXTRA * 2 - MAX_DEPS_SIZE)
/ (BLOCK_ID_SIZE + BLOCK_KEY_SIZE);
let mut parents = Self::make_tree(
blocks.as_slice(),
&conv_key,
self.root().deps(),
expiry,
arity,
);
blocks.append(&mut parents);
Ok(Object {
blocks,
deps: self.deps().clone(),
})
}
/// Load an Object from RepoStore
///
/// Returns Ok(Object) or an Err(Vec<ObjectId>) of missing BlockIds
pub fn load(
id: ObjectId,
key: Option<SymKey>,
store: &impl RepoStore,
) -> Result<Object, ObjectParseError> {
fn load_tree(
parents: Vec<BlockId>,
store: &impl RepoStore,
blocks: &mut Vec<Block>,
missing: &mut Vec<BlockId>,
) {
let mut children: Vec<BlockId> = vec![];
for id in parents {
match store.get(&id) {
Ok(block) => {
blocks.insert(0, block.clone());
match block {
Block::V0(o) => {
children.extend(o.children.iter().rev());
}
}
}
Err(_) => missing.push(id.clone()),
}
}
if !children.is_empty() {
load_tree(children, store, blocks, missing);
}
}
let mut blocks: Vec<Block> = vec![];
let mut missing: Vec<BlockId> = vec![];
load_tree(vec![id], store, &mut blocks, &mut missing);
if !missing.is_empty() {
return Err(ObjectParseError::MissingBlocks(missing));
}
let root = blocks.last_mut().unwrap();
if key.is_some() {
root.set_key(key);
}
let deps = match root.deps().clone() {
ObjectDeps::ObjectIdList(deps_vec) => deps_vec,
ObjectDeps::DepListRef(deps_ref) => {
let obj = Object::load(deps_ref.id, Some(deps_ref.key), store)?;
match obj.content()? {
ObjectContent::DepList(DepList::V0(deps_vec)) => deps_vec,
_ => return Err(ObjectParseError::InvalidDeps),
}
}
};
Ok(Object { blocks, deps })
}
/// Save blocks of the object in the store
pub fn save(&self, store: &mut impl RepoStore) -> Result<(), StorageError> {
let mut deduplicated: HashSet<ObjectId> = HashSet::new();
for block in &self.blocks {
let id = block.id();
if deduplicated.get(&id).is_none() {
store.put(block)?;
deduplicated.insert(id);
}
}
Ok(())
}
/// Get the ID of the Object
pub fn id(&self) -> ObjectId {
self.blocks.last().unwrap().id()
}
/// Get the key for the Object
pub fn key(&self) -> Option<SymKey> {
self.blocks.last().unwrap().key()
}
/// Get an `ObjectRef` for the root object
pub fn reference(&self) -> Option<ObjectRef> {
if self.key().is_some() {
Some(ObjectRef {
id: self.id(),
key: self.key().unwrap(),
})
} else {
None
}
}
pub fn root(&self) -> &Block {
self.blocks.last().unwrap()
}
pub fn expiry(&self) -> Option<Timestamp> {
self.blocks.last().unwrap().expiry()
}
pub fn deps(&self) -> &Vec<ObjectId> {
&self.deps
}
pub fn blocks(&self) -> &Vec<Block> {
&self.blocks
}
pub fn to_hashmap(&self) -> HashMap<BlockId, Block> {
let mut map: HashMap<BlockId, Block> = HashMap::new();
for block in &self.blocks {
map.insert(block.id(), block.clone());
}
map
}
/// Collect leaves from the tree
fn collect_leaves(
blocks: &Vec<Block>,
parents: &Vec<(ObjectId, SymKey)>,
parent_index: usize,
leaves: &mut Option<&mut Vec<Block>>,
obj_content: &mut Option<&mut Vec<u8>>,
) -> Result<(), ObjectParseError> {
/*debug_println!(
">>> collect_leaves: #{}..{}",
parent_index,
parent_index + parents.len() - 1
);*/
let mut children: Vec<(ObjectId, SymKey)> = vec![];
let mut i = parent_index;
for (id, key) in parents {
//debug_println!("!!! parent: #{}", i);
let block = &blocks[i];
i += 1;
// verify object ID
if *id != block.id() {
debug_println!("Invalid ObjectId.\nExp: {:?}\nGot: {:?}", *id, block.id());
return Err(ObjectParseError::InvalidBlockId);
}
match block {
Block::V0(b) => {
// decrypt content
let mut content_dec = b.content.clone();
match key {
SymKey::ChaCha20Key(key) => {
let nonce = [0u8; 12];
let mut cipher = ChaCha20::new(key.into(), &nonce.into());
let mut content_dec_slice = &mut content_dec.as_mut_slice();
cipher.apply_keystream(&mut content_dec_slice);
}
}
// deserialize content
let content: BlockContentV0;
match serde_bare::from_slice(content_dec.as_slice()) {
Ok(c) => content = c,
Err(e) => {
debug_println!("Block deserialize error: {}", e);
return Err(ObjectParseError::BlockDeserializeError);
}
}
// parse content
match content {
BlockContentV0::InternalNode(keys) => {
if keys.len() != b.children.len() {
debug_println!(
"Invalid keys length: got {}, expected {}",
keys.len(),
b.children.len()
);
debug_println!("!!! children: {:?}", b.children);
debug_println!("!!! keys: {:?}", keys);
return Err(ObjectParseError::InvalidKeys);
}
for (id, key) in b.children.iter().zip(keys.iter()) {
children.push((id.clone(), key.clone()));
}
}
BlockContentV0::DataChunk(chunk) => {
if leaves.is_some() {
let mut leaf = block.clone();
leaf.set_key(Some(*key));
let l = &mut **leaves.as_mut().unwrap();
l.push(leaf);
}
if obj_content.is_some() {
let c = &mut **obj_content.as_mut().unwrap();
c.extend_from_slice(chunk.as_slice());
}
}
}
}
}
}
if !children.is_empty() {
if parent_index < children.len() {
return Err(ObjectParseError::InvalidChildren);
}
match Self::collect_leaves(
blocks,
&children,
parent_index - children.len(),
leaves,
obj_content,
) {
Ok(_) => (),
Err(e) => return Err(e),
}
}
Ok(())
}
/// Parse the Object and return the leaf Blocks with decryption key set
pub fn leaves(&self) -> Result<Vec<Block>, ObjectParseError> {
let mut leaves: Vec<Block> = vec![];
let parents = vec![(self.id(), self.key().unwrap())];
match Self::collect_leaves(
&self.blocks,
&parents,
self.blocks.len() - 1,
&mut Some(&mut leaves),
&mut None,
) {
Ok(_) => Ok(leaves),
Err(e) => Err(e),
}
}
/// Parse the Object and return the decrypted content assembled from Blocks
pub fn content(&self) -> Result<ObjectContent, ObjectParseError> {
if self.key().is_none() {
return Err(ObjectParseError::MissingRootKey);
}
let mut obj_content: Vec<u8> = vec![];
let parents = vec![(self.id(), self.key().unwrap())];
match Self::collect_leaves(
&self.blocks,
&parents,
self.blocks.len() - 1,
&mut None,
&mut Some(&mut obj_content),
) {
Ok(_) => {
let content: ObjectContent;
match serde_bare::from_slice(obj_content.as_slice()) {
Ok(c) => Ok(c),
Err(e) => {
debug_println!("Object deserialize error: {}", e);
Err(ObjectParseError::ObjectDeserializeError)
}
}
}
Err(e) => Err(e),
}
}
}
#[cfg(test)]
mod test {
use crate::object::*;
use crate::store::*;
use crate::types::*;
// Those constants are calculated with RepoStore::get_max_value_size
/// Maximum arity of branch containing max number of leaves
const MAX_ARITY_LEAVES: usize = 31774;
/// Maximum arity of root branch
const MAX_ARITY_ROOT: usize = 31770;
/// Maximum data that can fit in object.content
const MAX_DATA_PAYLOAD_SIZE: usize = 2097112;
/// Test tree API
#[test]
pub fn test_object() {
let file = File::V0(FileV0 {
content_type: Vec::from("file/test"),
metadata: Vec::from("some meta data here"),
content: [(0..255).collect::<Vec<u8>>().as_slice(); 320].concat(),
});
let content = ObjectContent::File(file);
let deps: Vec<ObjectId> = vec![Digest::Blake3Digest32([9; 32])];
let exp = Some(2u32.pow(31));
let max_object_size = 0;
let repo_secret = SymKey::ChaCha20Key([0; 32]);
let repo_pubkey = PubKey::Ed25519PubKey([1; 32]);
let obj = Object::new(
content.clone(),
deps.clone(),
exp,
max_object_size,
repo_pubkey,
repo_secret,
);
println!("obj.id: {:?}", obj.id());
println!("obj.key: {:?}", obj.key());
println!("obj.deps: {:?}", obj.deps());
println!("obj.blocks.len: {:?}", obj.blocks().len());
let mut i = 0;
for node in obj.blocks() {
println!("#{}: {:?}", i, node.id());
i += 1;
}
assert_eq!(*obj.deps(), deps);
match obj.content() {
Ok(cnt) => {
assert_eq!(content, cnt);
}
Err(e) => panic!("Object parse error: {:?}", e),
}
let mut store = HashMapRepoStore::new();
obj.save(&mut store).expect("Object save error");
let obj2 = Object::load(obj.id(), obj.key(), &store).unwrap();
println!("obj2.id: {:?}", obj2.id());
println!("obj2.key: {:?}", obj2.key());
println!("obj2.deps: {:?}", obj2.deps());
println!("obj2.blocks.len: {:?}", obj2.blocks().len());
let mut i = 0;
for node in obj2.blocks() {
println!("#{}: {:?}", i, node.id());
i += 1;
}
assert_eq!(*obj2.deps(), deps);
assert_eq!(*obj2.deps(), deps);
match obj2.content() {
Ok(cnt) => {
assert_eq!(content, cnt);
}
Err(e) => panic!("Object2 parse error: {:?}", e),
}
let obj3 = Object::load(obj.id(), None, &store).unwrap();
println!("obj3.id: {:?}", obj3.id());
println!("obj3.key: {:?}", obj3.key());
println!("obj3.deps: {:?}", obj3.deps());
println!("obj3.blocks.len: {:?}", obj3.blocks().len());
let mut i = 0;
for node in obj3.blocks() {
println!("#{}: {:?}", i, node.id());
i += 1;
}
assert_eq!(*obj3.deps(), deps);
match obj3.content() {
Err(ObjectParseError::MissingRootKey) => (),
Err(e) => panic!("Object3 parse error: {:?}", e),
Ok(_) => panic!("Object3 should not return content"),
}
let exp4 = Some(2342);
let obj4 = obj.copy(exp4, repo_pubkey, repo_secret).unwrap();
obj4.save(&mut store).unwrap();
assert_eq!(obj4.expiry(), exp4);
assert_eq!(*obj.deps(), deps);
match obj4.content() {
Ok(cnt) => {
assert_eq!(content, cnt);
}
Err(e) => panic!("Object3 parse error: {:?}", e),
}
}
/// Checks that a content that fits the root node, will not be chunked into children nodes
#[test]
pub fn test_depth_1() {
let deps: Vec<ObjectId> = vec![Digest::Blake3Digest32([9; 32])];
let empty_file = ObjectContent::File(File::V0(FileV0 {
content_type: vec![],
metadata: vec![],
content: vec![],
}));
let empty_file_ser = serde_bare::to_vec(&empty_file).unwrap();
println!("empty file size: {}", empty_file_ser.len());
let size = store_max_value_size()
- EMPTY_BLOCK_SIZE
- DATA_VARINT_EXTRA
- BLOCK_ID_SIZE * deps.len()
- empty_file_ser.len()
- DATA_VARINT_EXTRA;
println!("file size: {}", size);
let content = ObjectContent::File(File::V0(FileV0 {
content_type: vec![],
metadata: vec![],
content: vec![99; size],
}));
let content_ser = serde_bare::to_vec(&content).unwrap();
println!("content len: {}", content_ser.len());
let expiry = Some(2u32.pow(31));
let max_object_size = store_max_value_size();
let repo_secret = SymKey::ChaCha20Key([0; 32]);
let repo_pubkey = PubKey::Ed25519PubKey([1; 32]);
let object = Object::new(
content,
deps,
expiry,
max_object_size,
repo_pubkey,
repo_secret,
);
println!("root_id: {:?}", object.id());
println!("root_key: {:?}", object.key().unwrap());
println!("nodes.len: {:?}", object.blocks().len());
//println!("root: {:?}", tree.root());
//println!("nodes: {:?}", object.blocks);
assert_eq!(object.blocks.len(), 1);
}
#[test]
pub fn test_block_size() {
let max_block_size = store_max_value_size();
println!("max_object_size: {}", max_block_size);
let id = Digest::Blake3Digest32([0u8; 32]);
let key = SymKey::ChaCha20Key([0u8; 32]);
let one_key = BlockContentV0::InternalNode(vec![key]);
let one_key_ser = serde_bare::to_vec(&one_key).unwrap();
let two_keys = BlockContentV0::InternalNode(vec![key, key]);
let two_keys_ser = serde_bare::to_vec(&two_keys).unwrap();
let max_keys = BlockContentV0::InternalNode(vec![key; MAX_ARITY_LEAVES]);
let max_keys_ser = serde_bare::to_vec(&max_keys).unwrap();
let data = BlockContentV0::DataChunk(vec![]);
let data_ser = serde_bare::to_vec(&data).unwrap();
let data_full = BlockContentV0::DataChunk(vec![0; MAX_DATA_PAYLOAD_SIZE]);
let data_full_ser = serde_bare::to_vec(&data_full).unwrap();
let leaf_empty = Block::new(
vec![],
ObjectDeps::ObjectIdList(vec![]),
Some(2342),
data_ser.clone(),
None,
);
let leaf_empty_ser = serde_bare::to_vec(&leaf_empty).unwrap();
let leaf_full_data = Block::new(
vec![],
ObjectDeps::ObjectIdList(vec![]),
Some(2342),
data_full_ser.clone(),
None,
);
let leaf_full_data_ser = serde_bare::to_vec(&leaf_full_data).unwrap();
let root_depsref = Block::new(
vec![],
ObjectDeps::DepListRef(ObjectRef { id: id, key: key }),
Some(2342),
data_ser.clone(),
None,
);
let root_depsref_ser = serde_bare::to_vec(&root_depsref).unwrap();
let internal_max = Block::new(
vec![id; MAX_ARITY_LEAVES],
ObjectDeps::ObjectIdList(vec![]),
Some(2342),
max_keys_ser.clone(),
None,
);
let internal_max_ser = serde_bare::to_vec(&internal_max).unwrap();
let internal_one = Block::new(
vec![id; 1],
ObjectDeps::ObjectIdList(vec![]),
Some(2342),
one_key_ser.clone(),
None,
);
let internal_one_ser = serde_bare::to_vec(&internal_one).unwrap();
let internal_two = Block::new(
vec![id; 2],
ObjectDeps::ObjectIdList(vec![]),
Some(2342),
two_keys_ser.clone(),
None,
);
let internal_two_ser = serde_bare::to_vec(&internal_two).unwrap();
let root_one = Block::new(
vec![id; 1],
ObjectDeps::ObjectIdList(vec![id; 8]),
Some(2342),
one_key_ser.clone(),
None,
);
let root_one_ser = serde_bare::to_vec(&root_one).unwrap();
let root_two = Block::new(
vec![id; 2],
ObjectDeps::ObjectIdList(vec![id; 8]),
Some(2342),
two_keys_ser.clone(),
None,
);
let root_two_ser = serde_bare::to_vec(&root_two).unwrap();
println!(
"range of valid value sizes {} {}",
store_valid_value_size(0),
store_max_value_size()
);
println!(
"max_data_payload_of_object: {}",
max_block_size - EMPTY_BLOCK_SIZE - DATA_VARINT_EXTRA
);
println!(
"max_data_payload_depth_1: {}",
max_block_size - EMPTY_BLOCK_SIZE - DATA_VARINT_EXTRA - MAX_DEPS_SIZE
);
println!(
"max_data_payload_depth_2: {}",
MAX_ARITY_ROOT * MAX_DATA_PAYLOAD_SIZE
);
println!(
"max_data_payload_depth_3: {}",
MAX_ARITY_ROOT * MAX_ARITY_LEAVES * MAX_DATA_PAYLOAD_SIZE
);
let max_arity_leaves = (max_block_size - EMPTY_BLOCK_SIZE - BIG_VARINT_EXTRA * 2)
/ (BLOCK_ID_SIZE + BLOCK_KEY_SIZE);
println!("max_arity_leaves: {}", max_arity_leaves);
assert_eq!(max_arity_leaves, MAX_ARITY_LEAVES);
assert_eq!(
max_block_size - EMPTY_BLOCK_SIZE - DATA_VARINT_EXTRA,
MAX_DATA_PAYLOAD_SIZE
);
let max_arity_root =
(max_block_size - EMPTY_BLOCK_SIZE - MAX_DEPS_SIZE - BIG_VARINT_EXTRA * 2)
/ (BLOCK_ID_SIZE + BLOCK_KEY_SIZE);
println!("max_arity_root: {}", max_arity_root);
assert_eq!(max_arity_root, MAX_ARITY_ROOT);
println!("store_max_value_size: {}", leaf_full_data_ser.len());
assert_eq!(leaf_full_data_ser.len(), max_block_size);
println!("leaf_empty: {}", leaf_empty_ser.len());
assert_eq!(leaf_empty_ser.len(), EMPTY_BLOCK_SIZE);
println!("root_depsref: {}", root_depsref_ser.len());
assert_eq!(root_depsref_ser.len(), EMPTY_ROOT_SIZE_DEPSREF);
println!("internal_max: {}", internal_max_ser.len());
assert_eq!(
internal_max_ser.len(),
EMPTY_BLOCK_SIZE
+ BIG_VARINT_EXTRA * 2
+ MAX_ARITY_LEAVES * (BLOCK_ID_SIZE + BLOCK_KEY_SIZE)
);
assert!(internal_max_ser.len() < max_block_size);
println!("internal_one: {}", internal_one_ser.len());
assert_eq!(
internal_one_ser.len(),
EMPTY_BLOCK_SIZE + 1 * BLOCK_ID_SIZE + 1 * BLOCK_KEY_SIZE
);
println!("internal_two: {}", internal_two_ser.len());
assert_eq!(
internal_two_ser.len(),
EMPTY_BLOCK_SIZE + 2 * BLOCK_ID_SIZE + 2 * BLOCK_KEY_SIZE
);
println!("root_one: {}", root_one_ser.len());
assert_eq!(
root_one_ser.len(),
EMPTY_BLOCK_SIZE + 8 * BLOCK_ID_SIZE + 1 * BLOCK_ID_SIZE + 1 * BLOCK_KEY_SIZE
);
println!("root_two: {}", root_two_ser.len());
assert_eq!(
root_two_ser.len(),
EMPTY_BLOCK_SIZE + 8 * BLOCK_ID_SIZE + 2 * BLOCK_ID_SIZE + 2 * BLOCK_KEY_SIZE
);
// let object_size_1 = 4096 * 1 - VALUE_HEADER_SIZE;
// let object_size_512 = 4096 * MAX_PAGES_PER_VALUE - VALUE_HEADER_SIZE;
// let arity_1: usize =
// (object_size_1 - 8 * OBJECT_ID_SIZE) / (OBJECT_ID_SIZE + OBJECT_KEY_SIZE);
// let arity_512: usize =
// (object_size_512 - 8 * OBJECT_ID_SIZE) / (OBJECT_ID_SIZE + OBJECT_KEY_SIZE);
// println!("1-page object_size: {}", object_size_1);
// println!("512-page object_size: {}", object_size_512);
// println!("max arity of 1-page object: {}", arity_1);
// println!("max arity of 512-page object: {}", arity_512);
}
}

@ -0,0 +1,46 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// This code is partly derived from work written by TG x Thoth from P2Pcollab.
// Copyright 2022 TG x Thoth
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Repository
use crate::types::*;
impl RepositoryV0 {
pub fn new(
id: &PubKey,
branches: &Vec<ObjectRef>,
allow_ext_requests: bool,
metadata: &Vec<u8>,
) -> RepositoryV0 {
RepositoryV0 {
id: id.clone(),
branches: branches.clone(),
allow_ext_requests,
metadata: metadata.clone(),
}
}
}
impl Repository {
pub fn new(
id: &PubKey,
branches: &Vec<ObjectRef>,
allow_ext_requests: bool,
metadata: &Vec<u8>,
) -> Repository {
Repository::V0(RepositoryV0::new(
id,
branches,
allow_ext_requests,
metadata,
))
}
}

@ -0,0 +1,109 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// This code is partly derived from work written by TG x Thoth from P2Pcollab.
// Copyright 2022 TG x Thoth
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! Block store
use crate::types::*;
use std::{
cmp::min,
collections::{hash_map::Iter, HashMap},
mem::size_of_val,
};
use std::sync::{Arc, RwLock};
pub trait RepoStore {
/// Load a block from the store.
fn get(&self, id: &BlockId) -> Result<Block, StorageError>;
/// Save a block to the store.
fn put(&self, block: &Block) -> Result<BlockId, StorageError>;
/// Delete a block from the store.
fn del(&self, id: &BlockId) -> Result<(Block, usize), StorageError>;
}
#[derive(Debug, PartialEq)]
pub enum StorageError {
NotFound,
InvalidValue,
BackendError,
SerializationError,
}
impl From<serde_bare::error::Error> for StorageError {
fn from(e: serde_bare::error::Error) -> Self {
StorageError::SerializationError
}
}
const MIN_SIZE: usize = 4072;
const PAGE_SIZE: usize = 4096;
const HEADER: usize = PAGE_SIZE - MIN_SIZE;
const MAX_FACTOR: usize = 512;
/// Returns a valid/optimal value size for the entries of the storage backend.
pub fn store_valid_value_size(size: usize) -> usize {
min(
((size + HEADER) as f32 / PAGE_SIZE as f32).ceil() as usize,
MAX_FACTOR,
) * PAGE_SIZE
- HEADER
}
/// Returns the maximum value size for the entries of the storage backend.
pub const fn store_max_value_size() -> usize {
MAX_FACTOR * PAGE_SIZE - HEADER
}
/// Store with a HashMap backend
pub struct HashMapRepoStore {
blocks: RwLock<HashMap<BlockId, Block>>,
}
impl HashMapRepoStore {
pub fn new() -> HashMapRepoStore {
HashMapRepoStore {
blocks: RwLock::new(HashMap::new()),
}
}
pub fn get_len(&self) -> usize {
self.blocks.read().unwrap().len()
}
pub fn get_all(&self) -> Vec<Block> {
self.blocks.read().unwrap().values().map(|x| x.clone()).collect()
}
}
impl RepoStore for HashMapRepoStore {
fn get(&self, id: &BlockId) -> Result<Block, StorageError> {
match self.blocks.read().unwrap().get(id) {
Some(block) => Ok(block.clone()),
None => Err(StorageError::NotFound),
}
}
fn put(&self, block: &Block) -> Result<BlockId, StorageError> {
let id = block.id();
let mut b = block.clone();
b.set_key(None);
self.blocks.write().unwrap().insert(id, b);
Ok(id)
}
fn del(&self, id: &BlockId) -> Result<(Block, usize), StorageError> {
let block = self.blocks.write().unwrap().remove(id).ok_or(StorageError::NotFound)?;
let size = size_of_val(&block);
Ok((block, size))
}
}

@ -0,0 +1,530 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// This code is partly derived from work written by TG x Thoth from P2Pcollab.
// Copyright 2022 TG x Thoth
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
//! P2P Repo types
//!
//! Corresponds to the BARE schema
use core::fmt;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::hash::Hash;
//
// COMMON TYPES
//
/// 32-byte Blake3 hash digest
pub type Blake3Digest32 = [u8; 32];
/// Hash digest
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)]
pub enum Digest {
Blake3Digest32(Blake3Digest32),
}
impl fmt::Display for Digest {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Digest::Blake3Digest32(d) => write!(f, "{}", hex::encode(d)),
}
}
}
/// ChaCha20 symmetric key
pub type ChaCha20Key = [u8; 32];
/// Symmetric cryptographic key
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)]
pub enum SymKey {
ChaCha20Key(ChaCha20Key),
}
impl SymKey {
pub fn slice(&self) -> &[u8; 32] {
match self {
SymKey::ChaCha20Key(o) => o,
}
}
}
/// Curve25519 public key
pub type Ed25519PubKey = [u8; 32];
/// Curve25519 private key
pub type Ed25519PrivKey = [u8; 32];
/// Public key
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)]
pub enum PubKey {
Ed25519PubKey(Ed25519PubKey),
}
impl PubKey {
pub fn slice(&self) -> &[u8; 32] {
match self {
PubKey::Ed25519PubKey(o) => o,
}
}
}
impl fmt::Display for PubKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
PubKey::Ed25519PubKey(d) => write!(f, "{}", hex::encode(d)),
}
}
}
/// Private key
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum PrivKey {
Ed25519PrivKey(Ed25519PrivKey),
}
/// Ed25519 signature
pub type Ed25519Sig = [[u8; 32]; 2];
/// Cryptographic signature
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum Sig {
Ed25519Sig(Ed25519Sig),
}
/// Timestamp: absolute time in minutes since 2022-02-22 22:22 UTC
pub type Timestamp = u32;
pub const EPOCH_AS_UNIX_TIMESTAMP: u64 = 1645568520;
/// Relative time (e.g. delay from current time)
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum RelTime {
Seconds(u8),
Minutes(u8),
Hours(u8),
Days(u8),
}
/// Bloom filter (variable size)
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct BloomFilter {
/// Number of hash functions
pub k: u32,
/// Filter
#[serde(with = "serde_bytes")]
pub f: Vec<u8>,
}
/// Bloom filter (128 B)
///
/// (m=1024; k=7; p=0.01; n=107)
pub type BloomFilter128 = [[u8; 32]; 4];
/// Bloom filter (1 KiB)
///
/// (m=8192; k=7; p=0.01; n=855)
pub type BloomFilter1K = [[u8; 32]; 32];
//
// REPOSITORY TYPES
//
/// Block ID:
/// BLAKE3 hash over the serialized Object with encrypted content
pub type BlockId = Digest;
/// Block reference
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)]
pub struct BlockRef {
/// Object ID
pub id: BlockId,
/// Key for decrypting the Object
pub key: SymKey,
}
/// Object ID
pub type ObjectId = BlockId;
/// Object reference
pub type ObjectRef = BlockRef;
/// Internal node of a Merkle tree
pub type InternalNode = Vec<SymKey>;
/// Content of BlockV0: a Merkle tree node
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum BlockContentV0 {
/// Internal node with references to children
InternalNode(InternalNode),
#[serde(with = "serde_bytes")]
DataChunk(Vec<u8>),
}
/// List of ObjectId dependencies as encrypted Object content
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum DepList {
V0(Vec<ObjectId>),
}
/// Dependencies of an Object
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum ObjectDeps {
/// List of Object IDs (max. 8),
ObjectIdList(Vec<ObjectId>),
/// Reference to an Object that contains a DepList
DepListRef(ObjectRef),
}
/// Immutable block with encrypted content
///
/// `ObjectContent` is chunked and stored as `Block`s in a Merkle tree.
/// A Block is a Merkle tree node.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct BlockV0 {
/// Block ID
#[serde(skip)]
pub id: Option<BlockId>,
/// Block Key
#[serde(skip)]
pub key: Option<SymKey>,
/// Block IDs for child nodes in the Merkle tree
pub children: Vec<BlockId>,
/// Other objects this object depends on (e.g. Commit deps & acks)
/// Only set for the root block
pub deps: ObjectDeps,
/// Expiry time of this object and all of its children
/// when the object should be deleted by all replicas
/// Only set for the root block
pub expiry: Option<Timestamp>,
/// Encrypted ObjectContentV0
///
/// Encrypted using convergent encryption with ChaCha20:
/// - convergence_key: BLAKE3 derive_key ("NextGraph Data BLAKE3 key",
/// repo_pubkey + repo_secret)
/// - key: BLAKE3 keyed hash (convergence_key, plain_object_content)
/// - nonce: 0
#[serde(with = "serde_bytes")]
pub content: Vec<u8>,
}
/// Immutable object with encrypted content
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum Block {
V0(BlockV0),
}
/// Repository definition
///
/// Published in root branch, where:
/// - branch_pubkey: repo_pubkey
/// - branch_secret: BLAKE3 derive_key ("NextGraph Root Branch secret",
/// repo_pubkey + repo_secret)
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct RepositoryV0 {
/// Repo public key ID
pub id: PubKey,
/// List of branches
pub branches: Vec<ObjectRef>,
/// Whether or not to allow external requests
pub allow_ext_requests: bool,
/// App-specific metadata
#[serde(with = "serde_bytes")]
pub metadata: Vec<u8>,
}
/// Repository definition
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum Repository {
V0(RepositoryV0),
}
/// Add a branch to the repository
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum AddBranch {
V0(ObjectRef),
}
/// Remove a branch from the repository
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum RemoveBranch {
V0(ObjectRef),
}
/// Commit object types
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq, Hash)]
pub enum CommitType {
Repository,
AddBranch,
RemoveBranch,
Branch,
AddMembers,
EndOfBranch,
Transaction,
Snapshot,
Ack,
}
/// Member of a Branch
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct MemberV0 {
/// Member public key ID
pub id: PubKey,
/// Commit types the member is allowed to publish in the branch
pub commit_types: Vec<CommitType>,
/// App-specific metadata
/// (role, permissions, cryptographic material, etc)
#[serde(with = "serde_bytes")]
pub metadata: Vec<u8>,
}
/// Member of a branch
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum Member {
V0(MemberV0),
}
/// Branch definition
///
/// First commit in a branch, signed by branch key
/// In case of a fork, the commit deps indicat
/// the previous branch heads.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct BranchV0 {
/// Branch public key ID
pub id: PubKey,
/// Pub/sub topic for publishing events
pub topic: PubKey,
/// Branch secret key
pub secret: SymKey,
/// Members with permissions
pub members: Vec<MemberV0>,
/// Number of acks required for a commit to be valid
pub quorum: HashMap<CommitType, u32>,
/// Delay to send explicit acks,
/// if not enough implicit acks arrived by then
pub ack_delay: RelTime,
/// Tags for organizing branches within the repository
#[serde(with = "serde_bytes")]
pub tags: Vec<u8>,
/// App-specific metadata (validation rules, etc)
#[serde(with = "serde_bytes")]
pub metadata: Vec<u8>,
}
/// Branch definition
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum Branch {
V0(BranchV0),
}
/// Add members to an existing branch
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct AddMembersV0 {
/// Members to add, with permissions
pub members: Vec<MemberV0>,
/// New quorum
pub quorum: Option<HashMap<CommitType, u32>>,
/// New ackDelay
pub ack_delay: Option<RelTime>,
}
/// Add members to an existing branch
///
/// If a member already exists, it overwrites the previous definition,
/// in that case this can only be used for adding new permissions,
/// not to remove existing ones.
/// The quorum and ackDelay can be changed as well
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum AddMembers {
V0(AddMembersV0),
}
/// ObjectRef for EndOfBranch
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum PlainOrEncryptedObjectRef {
Plain(ObjectRef),
Encrypted(Vec<u8>),
}
/// End of branch
///
/// No more commits accepted afterwards, only acks of this commit
/// May reference a fork where the branch continues
/// with possibly different members, permissions, validation rules.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct EndOfBranchV0 {
/// (Encrypted) reference to forked branch (optional)
pub fork: Option<PlainOrEncryptedObjectRef>,
/// Expiry time when all commits in the branch should be deleted
pub expiry: Timestamp,
}
/// End of branch
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum EndOfBranch {
V0(EndOfBranchV0),
}
/// Transaction with CRDT operations
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum Transaction {
#[serde(with = "serde_bytes")]
V0(Vec<u8>),
}
/// Snapshot of a Branch
///
/// Contains a data structure
/// computed from the commits at the specified head.
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct SnapshotV0 {
/// Branch heads the snapshot was made from
pub heads: Vec<ObjectId>,
/// Snapshot data structure
#[serde(with = "serde_bytes")]
pub content: Vec<u8>,
}
/// Snapshot of a Branch
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum Snapshot {
V0(SnapshotV0),
}
/// Acknowledgement of another Commit
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum Ack {
V0(),
}
/// Commit body, corresponds to CommitType
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum CommitBody {
Repository(Repository),
AddBranch(AddBranch),
RemoveBranch(RemoveBranch),
Branch(Branch),
AddMembers(AddMembers),
EndOfBranch(EndOfBranch),
Transaction(Transaction),
Snapshot(Snapshot),
Ack(Ack),
}
/// Content of a Commit
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct CommitContentV0 {
/// Commit author
pub author: PubKey,
/// Author's commit sequence number in this branch
pub seq: u32,
/// Branch the commit belongs to
pub branch: ObjectRef,
/// Direct dependencies of this commit
pub deps: Vec<ObjectRef>,
/// Not directly dependent heads to acknowledge
pub acks: Vec<ObjectRef>,
/// Files the commit references
pub refs: Vec<ObjectRef>,
/// App-specific metadata (commit message, creation time, etc)
#[serde(with = "serde_bytes")]
pub metadata: Vec<u8>,
/// Object with a CommitBody inside
pub body: ObjectRef,
/// Expiry time of the body object
pub expiry: Option<Timestamp>,
}
/// Commit object
/// Signed by branch key, or a member key authorized to publish this commit type
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct CommitV0 {
/// ID of parent Object
#[serde(skip)]
pub id: Option<ObjectId>,
/// Key of parent Object
#[serde(skip)]
pub key: Option<SymKey>,
/// Commit content
pub content: CommitContentV0,
/// Signature over the content by the author
pub sig: Sig,
}
/// Commit Object
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum Commit {
V0(CommitV0),
}
/// File Object
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub struct FileV0 {
#[serde(with = "serde_bytes")]
pub content_type: Vec<u8>,
#[serde(with = "serde_bytes")]
pub metadata: Vec<u8>,
#[serde(with = "serde_bytes")]
pub content: Vec<u8>,
}
/// A file stored in an Object
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum File {
V0(FileV0),
}
/// Immutable data stored encrypted in a Merkle tree
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
pub enum ObjectContent {
Commit(Commit),
CommitBody(CommitBody),
File(File),
DepList(DepList),
}

@ -0,0 +1,79 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// This code is partly derived from work written by TG x Thoth from P2Pcollab.
// Copyright 2022 TG x Thoth
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use crate::errors::*;
use crate::types::*;
use ed25519_dalek::*;
use rand::rngs::OsRng;
use std::time::{SystemTime, UNIX_EPOCH};
pub fn sign(
author_privkey: PrivKey,
author_pubkey: PubKey,
content: &Vec<u8>,
) -> Result<Sig, NgError> {
let kp = match (author_privkey, author_pubkey) {
(PrivKey::Ed25519PrivKey(sk), PubKey::Ed25519PubKey(pk)) => [sk, pk].concat(),
};
let keypair = Keypair::from_bytes(kp.as_slice())?;
let sig_bytes = keypair.sign(content.as_slice()).to_bytes();
let mut it = sig_bytes.chunks_exact(32);
let mut ss: Ed25519Sig = [[0; 32], [0; 32]];
ss[0].copy_from_slice(it.next().unwrap());
ss[1].copy_from_slice(it.next().unwrap());
Ok(Sig::Ed25519Sig(ss))
}
pub fn verify(content: &Vec<u8>, sig: Sig, pub_key: PubKey) -> Result<(), NgError> {
let pubkey = match pub_key {
PubKey::Ed25519PubKey(pk) => pk,
};
let pk = PublicKey::from_bytes(&pubkey)?;
let sig_bytes = match sig {
Sig::Ed25519Sig(ss) => [ss[0], ss[1]].concat(),
};
let sig = Signature::from_bytes(&sig_bytes)?;
Ok(pk.verify_strict(content, &sig)?)
}
pub fn generate_keypair() -> (PrivKey, PubKey) {
let mut csprng = OsRng {};
let keypair: Keypair = Keypair::generate(&mut csprng);
// println!(
// "private key: ({}) {:?}",
// keypair.secret.as_bytes().len(),
// keypair.secret.as_bytes()
// );
// println!(
// "public key: ({}) {:?}",
// keypair.public.as_bytes().len(),
// keypair.public.as_bytes()
// );
let ed_priv_key = keypair.secret.to_bytes();
let ed_pub_key = keypair.public.to_bytes();
let priv_key = PrivKey::Ed25519PrivKey(ed_priv_key);
let pub_key = PubKey::Ed25519PubKey(ed_pub_key);
(priv_key, pub_key)
}
/// returns the NextGraph Timestamp of now.
pub fn now_timestamp() -> Timestamp {
((SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs()
- EPOCH_AS_UNIX_TIMESTAMP)
/ 60)
.try_into()
.unwrap()
}

@ -3,8 +3,9 @@ name = "p2p-stores-lmdb"
version = "0.1.0"
edition = "2021"
license = "MIT/Apache-2.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
authors = ["Niko PLP <niko@nextgraph.org>"]
description = "P2P stores based on LMDB for NextGraph"
repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs"
[dependencies]
p2p-repo = { path = "../p2p-repo" }

@ -0,0 +1,233 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use p2p_repo::broker_store::*;
use p2p_repo::store::*;
use p2p_repo::types::*;
use p2p_repo::utils::*;
use debug_print::*;
use std::path::Path;
use std::path::PathBuf;
use std::sync::{Arc, RwLock};
use rkv::backend::{
BackendDatabaseFlags, BackendFlags, BackendIter, BackendWriteFlags, DatabaseFlags, Lmdb,
LmdbDatabase, LmdbDatabaseFlags, LmdbEnvironment, LmdbRwTransaction, LmdbWriteFlags,
};
use rkv::{
Manager, MultiStore, Rkv, SingleStore, StoreError, StoreOptions, Value, WriteFlags, Writer,
};
use serde::{Deserialize, Serialize};
use serde_bare::error::Error;
pub struct LmdbBrokerStore {
/// the main store where all the properties of keys are stored
main_store: MultiStore<LmdbDatabase>,
/// the opened environment so we can create new transactions
environment: Arc<RwLock<Rkv<LmdbEnvironment>>>,
/// path for the storage backend data
path: String,
}
impl BrokerStore for LmdbBrokerStore {
/// Load a single value property from the store.
fn get(&self, prefix: u8, key: &Vec<u8>, suffix: Option<u8>) -> Result<Vec<u8>, StorageError> {
let property = Self::compute_property(prefix, key, suffix);
let lock = self.environment.read().unwrap();
let reader = lock.read().unwrap();
let mut iter = self
.main_store
.get(&reader, property)
.map_err(|e| StorageError::BackendError)?;
match iter.next() {
Some(Ok(val)) => Ok(val.1.to_bytes().unwrap()),
Some(Err(_e)) => Err(StorageError::BackendError),
None => Err(StorageError::NotFound),
}
}
/// Load all the values of a property from the store.
fn get_all(
&self,
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
) -> Result<Vec<Vec<u8>>, StorageError> {
let property = Self::compute_property(prefix, key, suffix);
let lock = self.environment.read().unwrap();
let reader = lock.read().unwrap();
let mut iter = self
.main_store
.get(&reader, property)
.map_err(|e| StorageError::BackendError)?;
let mut vector: Vec<Vec<u8>> = vec![];
while let res = iter.next() {
vector.push(match res {
Some(Ok(val)) => val.1.to_bytes().unwrap(),
Some(Err(_e)) => return Err(StorageError::BackendError),
None => {
break;
}
});
}
Ok(vector)
}
/// Check if a specific value exists for a property from the store.
fn has_property_value(
&self,
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
value: Vec<u8>,
) -> Result<(), StorageError> {
let property = Self::compute_property(prefix, key, suffix);
let lock = self.environment.read().unwrap();
let reader = lock.read().unwrap();
let exists = self
.main_store
.get_key_value(&reader, property, &Value::Blob(value.as_slice()))
.map_err(|e| StorageError::BackendError)?;
if exists {
Ok(())
} else {
Err(StorageError::NotFound)
}
}
/// Save a property value to the store.
fn put(
&self,
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
value: Vec<u8>,
) -> Result<(), StorageError> {
let property = Self::compute_property(prefix, key, suffix);
let lock = self.environment.read().unwrap();
let mut writer = lock.write().unwrap();
self.main_store
.put(&mut writer, property, &Value::Blob(value.as_slice()))
.map_err(|e| StorageError::BackendError)?;
writer.commit().unwrap();
Ok(())
}
/// Replace the property of a key (single value) to the store.
fn replace(
&self,
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
value: Vec<u8>,
) -> Result<(), StorageError> {
let property = Self::compute_property(prefix, key, suffix);
let lock = self.environment.read().unwrap();
let mut writer = lock.write().unwrap();
self.main_store
.delete_all(&mut writer, property.clone())
.map_err(|e| StorageError::BackendError)?;
self.main_store
.put(&mut writer, property, &Value::Blob(value.as_slice()))
.map_err(|e| StorageError::BackendError)?;
writer.commit().unwrap();
Ok(())
}
/// Delete a property from the store.
fn del(&self, prefix: u8, key: &Vec<u8>, suffix: Option<u8>) -> Result<(), StorageError> {
let property = Self::compute_property(prefix, key, suffix);
let lock = self.environment.read().unwrap();
let mut writer = lock.write().unwrap();
self.main_store
.delete_all(&mut writer, property)
.map_err(|e| StorageError::BackendError)?;
writer.commit().unwrap();
Ok(())
}
/// Delete a specific value for a property from the store.
fn del_property_value(
&self,
prefix: u8,
key: &Vec<u8>,
suffix: Option<u8>,
value: Vec<u8>,
) -> Result<(), StorageError> {
let property = Self::compute_property(prefix, key, suffix);
let lock = self.environment.read().unwrap();
let mut writer = lock.write().unwrap();
self.main_store
.delete(&mut writer, property, &Value::Blob(value.as_slice()))
.map_err(|e| StorageError::BackendError)?;
writer.commit().unwrap();
Ok(())
}
/// Delete all properties of a key from the store.
fn del_all(&self, prefix: u8, key: &Vec<u8>, all_suffixes: &[u8]) -> Result<(), StorageError> {
for suffix in all_suffixes {
self.del(prefix, key, Some(*suffix))?;
}
if all_suffixes.is_empty() {
self.del(prefix, key, None)?;
}
Ok(())
}
}
impl LmdbBrokerStore {
pub fn path(&self) -> PathBuf {
PathBuf::from(&self.path)
}
fn compute_property(prefix: u8, key: &Vec<u8>, suffix: Option<u8>) -> Vec<u8> {
let mut new: Vec<u8> = Vec::with_capacity(key.len() + 2);
new.push(prefix);
new.extend(key);
if suffix.is_some() {
new.push(suffix.unwrap())
}
new
}
/// Opens the store and returns a BrokerStore object that should be kept and used to manipulate Accounts, Overlays, Topics and options
/// The key is the encryption key for the data at rest.
pub fn open<'a>(path: &Path, key: [u8; 32]) -> LmdbBrokerStore {
let mut manager = Manager::<LmdbEnvironment>::singleton().write().unwrap();
let shared_rkv = manager
.get_or_create(path, |path| {
//Rkv::new::<Lmdb>(path) // use this instead to disable encryption
Rkv::with_encryption_key_and_mapsize::<Lmdb>(path, key, 2 * 1024 * 1024 * 1024)
})
.unwrap();
let env = shared_rkv.read().unwrap();
println!("created env with LMDB Version: {}", env.version());
let main_store = env.open_multi("main", StoreOptions::create()).unwrap();
LmdbBrokerStore {
environment: shared_rkv.clone(),
main_store,
path: path.to_str().unwrap().to_string(),
}
}
}

@ -0,0 +1,3 @@
pub mod repo_store;
pub mod broker_store;

@ -0,0 +1,997 @@
// Copyright (c) 2022-2023 Niko Bonnieure, Par le Peuple, NextGraph.org developers
// All rights reserved.
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE2 or http://www.apache.org/licenses/LICENSE-2.0>
// or the MIT license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use p2p_repo::store::*;
use p2p_repo::types::*;
use p2p_repo::utils::*;
use debug_print::*;
use std::path::Path;
use std::sync::{Arc, RwLock};
use rkv::backend::{
BackendDatabaseFlags, BackendFlags, BackendIter, BackendWriteFlags, DatabaseFlags, Lmdb,
LmdbDatabase, LmdbDatabaseFlags, LmdbEnvironment, LmdbRwTransaction, LmdbWriteFlags,
};
use rkv::{
Manager, MultiIntegerStore, Rkv, SingleStore, StoreError, StoreOptions, Value, WriteFlags,
Writer,
};
use serde::{Deserialize, Serialize};
use serde_bare::error::Error;
pub struct LmdbRepoStore {
/// the main store where all the repo blocks are stored
main_store: SingleStore<LmdbDatabase>,
/// store for the pin boolean, recently_used timestamp, and synced boolean
meta_store: SingleStore<LmdbDatabase>,
/// store for the expiry timestamp
expiry_store: MultiIntegerStore<LmdbDatabase, u32>,
/// store for the LRU list
recently_used_store: MultiIntegerStore<LmdbDatabase, u32>,
/// the opened environment so we can create new transactions
environment: Arc<RwLock<Rkv<LmdbEnvironment>>>,
}
// TODO: versioning V0
#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)]
struct BlockMeta {
pub pin: bool,
pub last_used: Timestamp,
pub synced: bool,
}
impl RepoStore for LmdbRepoStore {
/// Retrieves a block from the storage backend.
fn get(&self, block_id: &BlockId) -> Result<Block, StorageError> {
let lock = self.environment.read().unwrap();
let reader = lock.read().unwrap();
let block_id_ser = serde_bare::to_vec(&block_id).unwrap();
let block_ser_res = self.main_store.get(&reader, block_id_ser.clone());
match block_ser_res {
Err(e) => Err(StorageError::BackendError),
Ok(None) => Err(StorageError::NotFound),
Ok(Some(block_ser)) => {
// updating recently_used
// first getting the meta for this BlockId
let meta_ser = self.meta_store.get(&reader, block_id_ser.clone()).unwrap();
match meta_ser {
Some(meta_value) => {
let mut meta =
serde_bare::from_slice::<BlockMeta>(&meta_value.to_bytes().unwrap())
.unwrap();
if meta.synced {
let mut writer = lock.write().unwrap();
let now = now_timestamp();
if !meta.pin {
// we remove the previous timestamp (last_used) from recently_used_store
self.remove_from_lru(&mut writer, &block_id_ser, &meta.last_used)
.unwrap();
// we add an entry to recently_used_store with now
self.add_to_lru(&mut writer, &block_id_ser, &now).unwrap();
}
// we save the new meta (with last_used:now)
meta.last_used = now;
let new_meta_ser = serde_bare::to_vec(&meta).unwrap();
self.meta_store
.put(
&mut writer,
block_id_ser,
&Value::Blob(new_meta_ser.as_slice()),
)
.unwrap();
// commit
writer.commit().unwrap();
}
}
_ => {} // there is no meta. we do nothing since we start to record LRU only once synced == true.
}
match serde_bare::from_slice::<Block>(&block_ser.to_bytes().unwrap()) {
Err(_e) => Err(StorageError::InvalidValue),
Ok(o) => {
if o.id() != *block_id {
debug_println!(
"Invalid ObjectId.\nExp: {:?}\nGot: {:?}\nContent: {:?}",
block_id,
o.id(),
o
);
panic!("CORRUPTION OF DATA !");
}
Ok(o)
}
}
}
}
}
/// Adds a block in the storage backend.
/// The block is persisted to disk.
/// Returns the BlockId of the Block.
fn put(&self, block: &Block) -> Result<BlockId, StorageError> {
let block_ser = serde_bare::to_vec(&block).unwrap();
let block_id = block.id();
let block_id_ser = serde_bare::to_vec(&block_id).unwrap();
let lock = self.environment.read().unwrap();
let mut writer = lock.write().unwrap();
// TODO: check if the block is already in store? if yes, don't put it again.
// I didnt do it yet because it is extra cost. surely a get on the store is lighter than a put
// but doing a get in additing to a put for every call, is probably even costlier. better to deal with that at the higher level
self.main_store
.put(
&mut writer,
&block_id_ser,
&Value::Blob(block_ser.as_slice()),
)
.unwrap();
// if it has an expiry, adding the BlockId to the expiry_store
match block.expiry() {
Some(expiry) => {
self.expiry_store
.put(&mut writer, expiry, &Value::Blob(block_id_ser.as_slice()))
.unwrap();
}
_ => {}
}
writer.commit().unwrap();
Ok(block_id)
}
/// Removes the block from the storage backend.
/// The removed block is returned, so it can be inspected.
/// Also returned is the approximate size of of free space that was reclaimed.
fn del(&self, block_id: &BlockId) -> Result<(Block, usize), StorageError> {
let lock = self.environment.read().unwrap();
let mut writer = lock.write().unwrap();
let block_id_ser = serde_bare::to_vec(&block_id).unwrap();
// retrieving the block itself (we need the expiry)
let block_ser = self
.main_store
.get(&writer, block_id_ser.clone())
.unwrap()
.ok_or(StorageError::NotFound)?;
let slice = block_ser.to_bytes().unwrap();
let block = serde_bare::from_slice::<Block>(&slice).unwrap(); //FIXME propagate error?
let meta_res = self.meta_store.get(&writer, block_id_ser.clone()).unwrap();
if meta_res.is_some() {
let meta = serde_bare::from_slice::<BlockMeta>(&meta_res.unwrap().to_bytes().unwrap())
.unwrap();
if meta.last_used != 0 {
self.remove_from_lru(&mut writer, &block_id_ser.clone(), &meta.last_used)
.unwrap();
}
// removing the meta
self.meta_store
.delete(&mut writer, block_id_ser.clone())
.unwrap();
}
// delete block from main_store
self.main_store
.delete(&mut writer, block_id_ser.clone())
.unwrap();
// remove BlockId from expiry_store, if any expiry
match block.expiry() {
Some(expiry) => {
self.expiry_store
.delete(
&mut writer,
expiry,
&Value::Blob(block_id_ser.clone().as_slice()),
)
.unwrap();
}
_ => {}
}
writer.commit().unwrap();
Ok((block, slice.len()))
}
}
impl LmdbRepoStore {
/// Opens the store and returns a RepoStore object that should be kept and used to call put/get/delete/pin
/// The key is the encryption key for the data at rest.
pub fn open<'a>(path: &Path, key: [u8; 32]) -> LmdbRepoStore {
let mut manager = Manager::<LmdbEnvironment>::singleton().write().unwrap();
let shared_rkv = manager
.get_or_create(path, |path| {
//Rkv::new::<Lmdb>(path) // use this instead to disable encryption
Rkv::with_encryption_key_and_mapsize::<Lmdb>(path, key, 2 * 1024 * 1024 * 1024)
})
.unwrap();
let env = shared_rkv.read().unwrap();
println!(
"created env with LMDB Version: {} key: {}",
env.version(),
hex::encode(&key)
);
let main_store = env.open_single("main", StoreOptions::create()).unwrap();
let meta_store = env.open_single("meta", StoreOptions::create()).unwrap();
let mut opts = StoreOptions::<LmdbDatabaseFlags>::create();
opts.flags.set(DatabaseFlags::DUP_FIXED, true);
let expiry_store = env.open_multi_integer("expiry", opts).unwrap();
let recently_used_store = env.open_multi_integer("recently_used", opts).unwrap();
LmdbRepoStore {
environment: shared_rkv.clone(),
main_store,
meta_store,
expiry_store,
recently_used_store,
}
}
//FIXME: use BlockId, not ObjectId. this is a block level operation
/// Pins the object
pub fn pin(&self, object_id: &ObjectId) -> Result<(), StorageError> {
self.set_pin(object_id, true)
}
//FIXME: use BlockId, not ObjectId. this is a block level operation
/// Unpins the object
pub fn unpin(&self, object_id: &ObjectId) -> Result<(), StorageError> {
self.set_pin(object_id, false)
}
//FIXME: use BlockId, not ObjectId. this is a block level operation
/// Sets the pin for that Object. if add is true, will add the pin. if false, will remove the pin.
/// A pin on an object prevents it from being removed when the store is making some disk space by using the LRU.
/// A pin does not override the expiry. If expiry is set and is reached, the obejct will be deleted, no matter what.
pub fn set_pin(&self, object_id: &ObjectId, add: bool) -> Result<(), StorageError> {
let lock = self.environment.read().unwrap();
let mut writer = lock.write().unwrap();
let obj_id_ser = serde_bare::to_vec(&object_id).unwrap();
let meta_ser = self.meta_store.get(&writer, &obj_id_ser).unwrap();
let mut meta;
// if adding a pin, if there is a meta (if already pinned, return) and is synced, remove the last_used timestamp from recently_used_store
// if no meta, create it with pin:true, synced: false
// if removing a pin (if pin already removed, return), if synced, add an entry to recently_used_store with the last_used timestamp (as found in meta, dont use now)
match meta_ser {
Some(meta_value) => {
meta =
serde_bare::from_slice::<BlockMeta>(&meta_value.to_bytes().unwrap()).unwrap();
if add == meta.pin {
// pinning while already pinned, or unpinning while already unpinned. NOP
return Ok(());
};
meta.pin = add;
if meta.synced {
if add {
// we remove the previous timestamp (last_used) from recently_used_store
self.remove_from_lru(&mut writer, &obj_id_ser, &meta.last_used)
.unwrap();
} else {
// we add an entry to recently_used_store with last_used
self.add_to_lru(&mut writer, &obj_id_ser, &meta.last_used)
.unwrap();
}
}
}
None => {
if add {
meta = BlockMeta {
pin: true,
synced: false,
last_used: 0,
}
} else {
// there is no meta, and user wants to unpin, so let's leave everything as it is.
return Ok(());
}
}
}
let new_meta_ser = serde_bare::to_vec(&meta).unwrap();
self.meta_store
.put(
&mut writer,
obj_id_ser,
&Value::Blob(new_meta_ser.as_slice()),
)
.unwrap();
// commit
writer.commit().unwrap();
Ok(())
}
//FIXME: use BlockId, not ObjectId. this is a block level operation
/// the broker calls this method when the block has been retrieved/synced by enough peers and it
/// can now be included in the LRU for potential garbage collection.
/// If this method has not been called on a block, it will be kept in the store and will not enter LRU.
pub fn has_been_synced(&self, block_id: &BlockId, when: Option<u32>) -> Result<(), Error> {
let lock = self.environment.read().unwrap();
let mut writer = lock.write().unwrap();
let block_id_ser = serde_bare::to_vec(&block_id).unwrap();
let meta_ser = self.meta_store.get(&writer, block_id_ser.clone()).unwrap();
let mut meta;
let now = match when {
None => now_timestamp(),
Some(w) => w,
};
// get the meta. if no meta, it is ok, we will create it after (with pin:false and synced:true)
// if already synced, return
// update the meta with last_used:now and synced:true
// if pinned, save and return
// otherwise add an entry to recently_used_store with now
match meta_ser {
Some(meta_value) => {
meta =
serde_bare::from_slice::<BlockMeta>(&meta_value.to_bytes().unwrap()).unwrap();
if meta.synced {
// already synced. NOP
return Ok(());
};
meta.synced = true;
meta.last_used = now;
if !meta.pin {
// we add an entry to recently_used_store with now
println!("adding to LRU");
self.add_to_lru(&mut writer, &block_id_ser, &now).unwrap();
}
}
None => {
meta = BlockMeta {
pin: false,
synced: true,
last_used: now,
};
println!("adding to LRU also");
self.add_to_lru(&mut writer, &block_id_ser, &now).unwrap();
}
}
let new_meta_ser = serde_bare::to_vec(&meta).unwrap();
self.meta_store
.put(
&mut writer,
block_id_ser,
&Value::Blob(new_meta_ser.as_slice()),
)
.unwrap();
// commit
writer.commit().unwrap();
Ok(())
}
/// Removes all the blocks that have expired.
/// The broker should call this method periodically.
pub fn remove_expired(&self) -> Result<(), Error> {
let mut block_ids: Vec<BlockId> = vec![];
{
let lock = self.environment.read().unwrap();
let reader = lock.read().unwrap();
let mut iter = self
.expiry_store
.iter_prev_dup_from(&reader, now_timestamp())
.unwrap();
while let Some(Ok(mut sub_iter)) = iter.next() {
while let Some(Ok(k)) = sub_iter.next() {
//println!("removing {:?} {:?}", k.0, k.1);
let block_id = serde_bare::from_slice::<ObjectId>(k.1).unwrap();
block_ids.push(block_id);
}
}
}
for block_id in block_ids {
self.del(&block_id).unwrap();
}
Ok(())
}
/// Removes some blocks that haven't been used for a while, reclaiming some space on disk.
/// The oldest are removed first, until the total amount of data removed is at least equal to size,
/// or the LRU list became empty. The approximate size of the storage space that was reclaimed is returned.
pub fn remove_least_used(&self, size: usize) -> usize {
let mut block_ids: Vec<BlockId> = vec![];
let mut total: usize = 0;
{
let lock = self.environment.read().unwrap();
let reader = lock.read().unwrap();
let mut iter = self.recently_used_store.iter_start(&reader).unwrap();
while let Some(Ok(entry)) = iter.next() {
let block_id =
serde_bare::from_slice::<ObjectId>(entry.1.to_bytes().unwrap().as_slice())
.unwrap();
block_ids.push(block_id);
}
}
for block_id in block_ids {
let (block, block_size) = self.del(&block_id).unwrap();
println!("removed {:?}", block_id);
total += block_size;
if total >= size {
break;
}
}
total
}
fn remove_from_lru(
&self,
writer: &mut Writer<LmdbRwTransaction>,
block_id_ser: &Vec<u8>,
time: &Timestamp,
) -> Result<(), StoreError> {
self.recently_used_store
.delete(writer, *time, &Value::Blob(block_id_ser.as_slice()))
}
fn add_to_lru(
&self,
writer: &mut Writer<LmdbRwTransaction>,
block_id_ser: &Vec<u8>,
time: &Timestamp,
) -> Result<(), StoreError> {
let mut flag = LmdbWriteFlags::empty();
flag.set(WriteFlags::APPEND_DUP, true);
self.recently_used_store.put_with_flags(
writer,
*time,
&Value::Blob(block_id_ser.as_slice()),
flag,
)
}
fn list_all(&self) {
let lock = self.environment.read().unwrap();
let reader = lock.read().unwrap();
println!("MAIN");
let mut iter = self.main_store.iter_start(&reader).unwrap();
while let Some(Ok(entry)) = iter.next() {
println!("{:?} {:?}", entry.0, entry.1)
}
println!("META");
let mut iter2 = self.meta_store.iter_start(&reader).unwrap();
while let Some(Ok(entry)) = iter2.next() {
println!("{:?} {:?}", entry.0, entry.1)
}
println!("EXPIRY");
let mut iter3 = self.expiry_store.iter_start(&reader).unwrap();
while let Some(Ok(entry)) = iter3.next() {
println!("{:?} {:?}", entry.0, entry.1)
}
println!("LRU");
let mut iter4 = self.recently_used_store.iter_start(&reader).unwrap();
while let Some(Ok(entry)) = iter4.next() {
println!("{:?} {:?}", entry.0, entry.1)
}
}
}
#[cfg(test)]
mod test {
use crate::repo_store::LmdbRepoStore;
use p2p_repo::store::*;
use p2p_repo::types::*;
use p2p_repo::utils::*;
use rkv::backend::{BackendInfo, BackendStat, Lmdb, LmdbEnvironment};
use rkv::{Manager, Rkv, StoreOptions, Value};
#[allow(unused_imports)]
use std::time::Duration;
#[allow(unused_imports)]
use std::{fs, thread};
use tempfile::Builder;
#[test]
pub fn test_remove_least_used() {
let path_str = "test-env";
let root = Builder::new().prefix(path_str).tempdir().unwrap();
let key: [u8; 32] = [0; 32];
fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap());
let mut store = LmdbRepoStore::open(root.path(), key);
let mut now = now_timestamp();
now -= 200;
// TODO: fix the LMDB bug that is triggered with x max set to 86 !!!
for x in 1..85 {
let block = Block::new(
Vec::new(),
ObjectDeps::ObjectIdList(Vec::new()),
None,
vec![x; 10],
None,
);
let block_id = store.put(&block).unwrap();
println!("#{} -> objId {:?}", x, block_id);
store
.has_been_synced(&block_id, Some(now + x as u32))
.unwrap();
}
let ret = store.remove_least_used(200);
println!("removed {}", ret);
assert_eq!(ret, 208)
//store.list_all();
}
#[test]
pub fn test_set_pin() {
let path_str = "test-env";
let root = Builder::new().prefix(path_str).tempdir().unwrap();
let key: [u8; 32] = [0; 32];
fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap());
let mut store = LmdbRepoStore::open(root.path(), key);
let mut now = now_timestamp();
now -= 200;
// TODO: fix the LMDB bug that is triggered with x max set to 86 !!!
for x in 1..100 {
let block = Block::new(
Vec::new(),
ObjectDeps::ObjectIdList(Vec::new()),
None,
vec![x; 10],
None,
);
let obj_id = store.put(&block).unwrap();
println!("#{} -> objId {:?}", x, obj_id);
store.set_pin(&obj_id, true).unwrap();
store
.has_been_synced(&obj_id, Some(now + x as u32))
.unwrap();
}
let ret = store.remove_least_used(200);
println!("removed {}", ret);
assert_eq!(ret, 0);
store.list_all();
}
#[test]
pub fn test_get_valid_value_size() {
assert_eq!(store_valid_value_size(0), 4072);
assert_eq!(store_valid_value_size(2), 4072);
assert_eq!(store_valid_value_size(4072), 4072);
assert_eq!(store_valid_value_size(4072 + 1), 4072 + 4096);
assert_eq!(store_valid_value_size(4072 + 4096), 4072 + 4096);
assert_eq!(store_valid_value_size(4072 + 4096 + 1), 4072 + 4096 + 4096);
assert_eq!(
store_valid_value_size(4072 + 4096 + 4096),
4072 + 4096 + 4096
);
assert_eq!(
store_valid_value_size(4072 + 4096 + 4096 + 1),
4072 + 4096 + 4096 + 4096
);
assert_eq!(store_valid_value_size(4072 + 4096 * 511), 4072 + 4096 * 511);
assert_eq!(
store_valid_value_size(4072 + 4096 * 511 + 1),
4072 + 4096 * 511
);
}
#[test]
pub fn test_remove_expired() {
let path_str = "test-env";
let root = Builder::new().prefix(path_str).tempdir().unwrap();
let key: [u8; 32] = [0; 32];
fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap());
let mut store = LmdbRepoStore::open(root.path(), key);
let now = now_timestamp();
let list = [
now - 10,
now - 6,
now - 6,
now - 3,
now - 2,
now - 1, //#5 should be removed, and above
now + 3,
now + 4,
now + 4,
now + 5,
now + 10,
];
let mut block_ids: Vec<ObjectId> = Vec::with_capacity(11);
println!("now {}", now);
let mut i = 0u8;
for expiry in list {
//let i: u8 = (expiry + 10 - now).try_into().unwrap();
let block = Block::new(
Vec::new(),
ObjectDeps::ObjectIdList(Vec::new()),
Some(expiry),
[i].to_vec(),
None,
);
let block_id = store.put(&block).unwrap();
println!("#{} -> objId {:?}", i, block_id);
block_ids.push(block_id);
i += 1;
}
store.remove_expired().unwrap();
assert!(store.get(block_ids.get(0).unwrap()).is_err());
assert!(store.get(block_ids.get(1).unwrap()).is_err());
assert!(store.get(block_ids.get(2).unwrap()).is_err());
assert!(store.get(block_ids.get(5).unwrap()).is_err());
assert!(store.get(block_ids.get(6).unwrap()).is_ok());
assert!(store.get(block_ids.get(7).unwrap()).is_ok());
//store.list_all();
}
#[test]
pub fn test_remove_all_expired() {
let path_str = "test-env";
let root = Builder::new().prefix(path_str).tempdir().unwrap();
let key: [u8; 32] = [0; 32];
fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap());
let mut store = LmdbRepoStore::open(root.path(), key);
let now = now_timestamp();
let list = [
now - 10,
now - 6,
now - 6,
now - 3,
now - 2,
now - 2, //#5 should be removed, and above
];
let mut block_ids: Vec<ObjectId> = Vec::with_capacity(6);
println!("now {}", now);
let mut i = 0u8;
for expiry in list {
//let i: u8 = (expiry + 10 - now).try_into().unwrap();
let block = Block::new(
Vec::new(),
ObjectDeps::ObjectIdList(Vec::new()),
Some(expiry),
[i].to_vec(),
None,
);
let block_id = store.put(&block).unwrap();
println!("#{} -> objId {:?}", i, block_id);
block_ids.push(block_id);
i += 1;
}
store.remove_expired().unwrap();
assert!(store.get(block_ids.get(0).unwrap()).is_err());
assert!(store.get(block_ids.get(1).unwrap()).is_err());
assert!(store.get(block_ids.get(2).unwrap()).is_err());
assert!(store.get(block_ids.get(3).unwrap()).is_err());
assert!(store.get(block_ids.get(4).unwrap()).is_err());
assert!(store.get(block_ids.get(5).unwrap()).is_err());
}
#[test]
pub fn test_remove_empty_expired() {
let path_str = "test-env";
let root = Builder::new().prefix(path_str).tempdir().unwrap();
let key: [u8; 32] = [0; 32];
fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap());
let store = LmdbRepoStore::open(root.path(), key);
store.remove_expired().unwrap();
}
#[test]
pub fn test_store_block() {
let path_str = "test-env";
let root = Builder::new().prefix(path_str).tempdir().unwrap();
let key: [u8; 32] = [0; 32];
fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap());
let mut store = LmdbRepoStore::open(root.path(), key);
let block = Block::new(
Vec::new(),
ObjectDeps::ObjectIdList(Vec::new()),
None,
b"abc".to_vec(),
None,
);
let block_id = store.put(&block).unwrap();
assert_eq!(block_id, block.id());
println!("ObjectId: {:?}", block_id);
assert_eq!(
block_id,
Digest::Blake3Digest32([
155, 83, 186, 17, 95, 10, 80, 31, 111, 24, 250, 64, 8, 145, 71, 193, 103, 246, 202,
28, 202, 144, 63, 65, 85, 229, 136, 85, 202, 34, 13, 85
])
);
let block_res = store.get(&block_id).unwrap();
println!("Block: {:?}", block_res);
assert_eq!(block_res.id(), block.id());
}
#[test]
pub fn test_lmdb() {
let path_str = "test-env";
let root = Builder::new().prefix(path_str).tempdir().unwrap();
// we set an encryption key with all zeros... for test purpose only ;)
let key: [u8; 32] = [0; 32];
{
fs::create_dir_all(root.path()).unwrap();
println!("{}", root.path().to_str().unwrap());
let mut manager = Manager::<LmdbEnvironment>::singleton().write().unwrap();
let shared_rkv = manager
.get_or_create(root.path(), |path| {
// Rkv::new::<Lmdb>(path) // use this instead to disable encryption
Rkv::with_encryption_key_and_mapsize::<Lmdb>(path, key, 2 * 1024 * 1024 * 1024)
})
.unwrap();
let env = shared_rkv.read().unwrap();
println!("LMDB Version: {}", env.version());
let store = env.open_single("testdb", StoreOptions::create()).unwrap();
{
// Use a write transaction to mutate the store via a `Writer`. There can be only
// one writer for a given environment, so opening a second one will block until
// the first completes.
let mut writer = env.write().unwrap();
// Keys are `AsRef<[u8]>`, while values are `Value` enum instances. Use the `Blob`
// variant to store arbitrary collections of bytes. Putting data returns a
// `Result<(), StoreError>`, where StoreError is an enum identifying the reason
// for a failure.
// store.put(&mut writer, "int", &Value::I64(1234)).unwrap();
// store
// .put(&mut writer, "uint", &Value::U64(1234_u64))
// .unwrap();
// store
// .put(&mut writer, "float", &Value::F64(1234.0.into()))
// .unwrap();
// store
// .put(&mut writer, "instant", &Value::Instant(1528318073700))
// .unwrap();
// store
// .put(&mut writer, "boolean", &Value::Bool(true))
// .unwrap();
// store
// .put(&mut writer, "string", &Value::Str("Héllo, wörld!"))
// .unwrap();
// store
// .put(
// &mut writer,
// "json",
// &Value::Json(r#"{"foo":"bar", "number": 1}"#),
// )
// .unwrap();
const EXTRA: usize = 2095; // + 4096 * 524280 + 0;
let key: [u8; 33] = [0; 33];
let key2: [u8; 33] = [2; 33];
let key3: [u8; 33] = [3; 33];
let key4: [u8; 33] = [4; 33];
//let value: [u8; 1977 + EXTRA] = [1; 1977 + EXTRA];
let value = vec![1; 1977 + EXTRA];
let value2: [u8; 1977 + 1] = [1; 1977 + 1];
let value4: [u8; 953 + 0] = [1; 953 + 0];
store.put(&mut writer, key, &Value::Blob(&value2)).unwrap();
store.put(&mut writer, key2, &Value::Blob(&value2)).unwrap();
// store.put(&mut writer, key3, &Value::Blob(&value)).unwrap();
// store.put(&mut writer, key4, &Value::Blob(&value4)).unwrap();
// You must commit a write transaction before the writer goes out of scope, or the
// transaction will abort and the data won't persist.
writer.commit().unwrap();
let reader = env.read().expect("reader");
let stat = store.stat(&reader).unwrap();
println!("LMDB stat page_size : {}", stat.page_size());
println!("LMDB stat depth : {}", stat.depth());
println!("LMDB stat branch_pages : {}", stat.branch_pages());
println!("LMDB stat leaf_pages : {}", stat.leaf_pages());
println!("LMDB stat overflow_pages : {}", stat.overflow_pages());
println!("LMDB stat entries : {}", stat.entries());
}
// {
// // Use a read transaction to query the store via a `Reader`. There can be multiple
// // concurrent readers for a store, and readers never block on a writer nor other
// // readers.
// let reader = env.read().expect("reader");
// // Keys are `AsRef<u8>`, and the return value is `Result<Option<Value>, StoreError>`.
// // println!("Get int {:?}", store.get(&reader, "int").unwrap());
// // println!("Get uint {:?}", store.get(&reader, "uint").unwrap());
// // println!("Get float {:?}", store.get(&reader, "float").unwrap());
// // println!("Get instant {:?}", store.get(&reader, "instant").unwrap());
// // println!("Get boolean {:?}", store.get(&reader, "boolean").unwrap());
// // println!("Get string {:?}", store.get(&reader, "string").unwrap());
// // println!("Get json {:?}", store.get(&reader, "json").unwrap());
// println!("Get blob {:?}", store.get(&reader, "blob").unwrap());
// // Retrieving a non-existent value returns `Ok(None)`.
// println!(
// "Get non-existent value {:?}",
// store.get(&reader, "non-existent").unwrap()
// );
// // A read transaction will automatically close once the reader goes out of scope,
// // so isn't necessary to close it explicitly, although you can do so by calling
// // `Reader.abort()`.
// }
// {
// // Aborting a write transaction rolls back the change(s).
// let mut writer = env.write().unwrap();
// store.put(&mut writer, "foo", &Value::Blob(b"bar")).unwrap();
// writer.abort();
// let reader = env.read().expect("reader");
// println!(
// "It should be None! ({:?})",
// store.get(&reader, "foo").unwrap()
// );
// }
// {
// // Explicitly aborting a transaction is not required unless an early abort is
// // desired, since both read and write transactions will implicitly be aborted once
// // they go out of scope.
// {
// let mut writer = env.write().unwrap();
// store.put(&mut writer, "foo", &Value::Blob(b"bar")).unwrap();
// }
// let reader = env.read().expect("reader");
// println!(
// "It should be None! ({:?})",
// store.get(&reader, "foo").unwrap()
// );
// }
// {
// // Deleting a key/value pair also requires a write transaction.
// let mut writer = env.write().unwrap();
// store.put(&mut writer, "foo", &Value::Blob(b"bar")).unwrap();
// store.put(&mut writer, "bar", &Value::Blob(b"baz")).unwrap();
// store.delete(&mut writer, "foo").unwrap();
// // A write transaction also supports reading, and the version of the store that it
// // reads includes the changes it has made regardless of the commit state of that
// // transaction.
// // In the code above, "foo" and "bar" were put into the store, then "foo" was
// // deleted so only "bar" will return a result when the database is queried via the
// // writer.
// println!(
// "It should be None! ({:?})",
// store.get(&writer, "foo").unwrap()
// );
// println!("Get bar ({:?})", store.get(&writer, "bar").unwrap());
// // But a reader won't see that change until the write transaction is committed.
// {
// let reader = env.read().expect("reader");
// println!("Get foo {:?}", store.get(&reader, "foo").unwrap());
// println!("Get bar {:?}", store.get(&reader, "bar").unwrap());
// }
// writer.commit().unwrap();
// {
// let reader = env.read().expect("reader");
// println!(
// "It should be None! ({:?})",
// store.get(&reader, "foo").unwrap()
// );
// println!("Get bar {:?}", store.get(&reader, "bar").unwrap());
// }
// // Committing a transaction consumes the writer, preventing you from reusing it by
// // failing at compile time with an error. This line would report "error[E0382]:
// // borrow of moved value: `writer`".
// // store.put(&mut writer, "baz", &Value::Str("buz")).unwrap();
// }
// {
// // Clearing all the entries in the store with a write transaction.
// {
// let mut writer = env.write().unwrap();
// store.put(&mut writer, "foo", &Value::Blob(b"bar")).unwrap();
// store.put(&mut writer, "bar", &Value::Blob(b"baz")).unwrap();
// writer.commit().unwrap();
// }
// // {
// // let mut writer = env.write().unwrap();
// // store.clear(&mut writer).unwrap();
// // writer.commit().unwrap();
// // }
// // {
// // let reader = env.read().expect("reader");
// // println!(
// // "It should be None! ({:?})",
// // store.get(&reader, "foo").unwrap()
// // );
// // println!(
// // "It should be None! ({:?})",
// // store.get(&reader, "bar").unwrap()
// // );
// // }
// }
let stat = env.stat().unwrap();
let info = env.info().unwrap();
println!("LMDB info map_size : {}", info.map_size());
println!("LMDB info last_pgno : {}", info.last_pgno());
println!("LMDB info last_txnid : {}", info.last_txnid());
println!("LMDB info max_readers : {}", info.max_readers());
println!("LMDB info num_readers : {}", info.num_readers());
println!("LMDB stat page_size : {}", stat.page_size());
println!("LMDB stat depth : {}", stat.depth());
println!("LMDB stat branch_pages : {}", stat.branch_pages());
println!("LMDB stat leaf_pages : {}", stat.leaf_pages());
println!("LMDB stat overflow_pages : {}", stat.overflow_pages());
println!("LMDB stat entries : {}", stat.entries());
}
// We reopen the env and data to see if it was well saved to disk.
{
let mut manager = Manager::<LmdbEnvironment>::singleton().write().unwrap();
let shared_rkv = manager
.get_or_create(root.path(), |path| {
//Rkv::new::<Lmdb>(path) // use this instead to disable encryption
Rkv::with_encryption_key_and_mapsize::<Lmdb>(path, key, 2 * 1024 * 1024 * 1024)
})
.unwrap();
let env = shared_rkv.read().unwrap();
println!("LMDB Version: {}", env.version());
let mut store = env.open_single("testdb", StoreOptions::default()).unwrap(); //StoreOptions::create()
{
let reader = env.read().expect("reader");
println!(
"It should be baz! ({:?})",
store.get(&reader, "bar").unwrap()
);
}
}
// Here the database and environment is closed, but the files are still present in the temp directory.
// uncomment this if you need time to copy them somewhere for analysis, before the temp folder get destroyed
//thread::sleep(Duration::from_millis(20000));
}
}

@ -0,0 +1,17 @@
[package]
name = "p2p-verifier"
version = "0.1.0"
edition = "2021"
license = "MIT/Apache-2.0"
authors = ["Niko PLP <niko@nextgraph.org>"]
description = "P2P Verifier module of NextGraph"
repository = "https://git.nextgraph.org/NextGraph/nextgraph-rs"
[dependencies]
p2p-repo = { path = "../p2p-repo" }
p2p-net = { path = "../p2p-net" }
blake3 = "1.3.1"
chacha20 = "0.9.0"
serde = { version = "1.0", features = ["derive"] }
serde_bare = "0.5.0"
serde_bytes = "0.11.7"
Loading…
Cancel
Save