Use FutureStream for accessing all blobs in a set

This commit is contained in:
Ehmry - 2019-03-18 21:26:59 +01:00
parent 7961e98031
commit 40aa95952b
6 changed files with 249 additions and 133 deletions

View File

@ -1,5 +1,5 @@
import std/asyncdispatch
import std/tables, std/xmltree, std/strtabs, std/strutils, std/streams, std/xmlparser, std/random
import std/tables, std/xmltree, std/strtabs, std/strutils, std/streams, std/xmlparser, std/random, std/times
import genode, genode/signals, genode/parents, genode/servers, genode/roms
@ -79,7 +79,7 @@ type
next: Handle
nodes: Table[Handle, Node]
## Read files from the store into this buffer
random: int
rng: Rand
Session = ptr SessionObj | ref SessionObj | SessionObj
@ -161,7 +161,7 @@ proc statusProc(state: pointer; handle: Handle): Status {.exportc.} =
st.inode = 0
st.mode = DirMode
if node.path == "?":
session.store.randomApply(session.fsSet, session.random) do (id: BlobId; size: BiggestInt):
session.store.randomApply(session.fsSet, session.rng) do (id: BlobId; size: BiggestInt):
st.size = (culonglong)size
st.mode = FileMode
st.inode = node.inode
@ -183,7 +183,7 @@ proc dirProc(state: pointer; path: cstring; create: cint): Handle {.exportc.} =
else:
validPathAssert path
path = path.strip(chars={'/'})
if session.fsSet.contains path:
if contains(session.store, session.fsSet, path):
return (not Handle(0))
n = Node(path: path, kind: dirNode)
result = session.nextId
@ -204,15 +204,13 @@ proc fileProc(state: pointer; dirH: Handle; name: cstring; mode: cuint; create:
var retry = 16
while not success and retry > 0:
# just open something
session.store.randomApply(session.fsSet, session.random) do (id: BlobId; size: BiggestInt):
session.store.randomApply(session.fsSet, session.rng) do (id: BlobId; size: BiggestInt):
try:
let stream = session.store.openBlobStream(id, size, dataBlob)
n = Node(path: path, kind: fileNode, id: id, size: size, stream: stream)
session.random = rand(int.high)
success = true
except:
dec retry
inc session.random
else:
session.apply(path) do (id: BlobId; size: BiggestInt):
let stream = session.store.openBlobStream(id, size, dataBlob)
@ -283,7 +281,7 @@ proc newSession(env: GenodeEnv; store: BlobStore; label: string; setId: SetId; f
fsSetId: setId,
fsSet: fsSet,
nodes: initTable[Handle, Node](),
random: rand(int.high)
rng: initRand(cpuTime().int)
)
session.sig = env.ep.newSignalHandler do ():
while session.cpp.packetAvail and session.cpp.readyToAck:

View File

@ -32,11 +32,11 @@ proc openStore(): BlobStore =
quit(key & " not set in environment")
newHttpStore(url)
proc serverMain() =
proc serverMain(): Future[void] =
let
store = newFileStore("/tmp/blobs")
server = newHttpStoreServer(store)
waitFor server.serve((Port)8080)
server.serve((Port)8080)
proc dumpMain() =
var args = newSeq[string]()
@ -62,7 +62,7 @@ proc insertPath(store: BlobStore; bs: BlobSet; kind: PathComponent; path: string
let (id, size) = waitFor store.ingestFile(path)
path.removePrefix(getCurrentDir())
path.removePrefix("/")
result = result.insert(path, id, size)
result = insert(store, result, path, id, size)
writeLine(stdout, id, align($size, 11), " ", path)
of pcDir, pcLinkToDir:
for kind, subPath in path.walkDir:
@ -71,7 +71,7 @@ proc insertPath(store: BlobStore; bs: BlobSet; kind: PathComponent; path: string
let e = getCurrentException()
writeLine(stderr, "failed to ingest '", path, "', ", e.msg)
proc ingestMain() =
proc ingestMain() {.async.} =
var args = newSeq[string]()
for kind, key, val in getopt():
if kind == cmdArgument:
@ -82,10 +82,17 @@ proc ingestMain() =
for i in 1..args.high:
let path = normalizedPath args[i]
set = store.insertPath(set, path.getFileInfo.kind, path)
let final = store.commit set
let final = await store.commit(set)
writeLine(stdout, final.setId.toHex)
proc checkMain() =
func isShortHash(id: BlobId): bool =
var r: byte
for i in countup(16, id.data.high):
r = r or id.data[i]
r == 0
proc checkMain() {.async.} =
randomize()
var args = newSeq[string]()
for kind, key, val in getopt():
if kind == cmdArgument:
@ -94,13 +101,73 @@ proc checkMain() =
let store = openStore()
for i in 1..args.high:
try:
let bs = store.load(args[i].toBlobId)
store.applyAll(bs, i) do (id: BlobId; size: BiggestInt):
close(store.openBlobStream(id, size, dataBlob))
var bs = store.load(args[i].toBlobId)
let stream = newMemberStream()
asyncCheck stream.streamMembers(store, bs)
var m: tuple[key: Key; id: BlobId; size: BiggestInt]
while true:
let (valid, m) = await stream.read()
if not valid: break
if m.id.isShortHash:
echo m.key, " has a short hash - ", m.id
bs = remove(store, bs, m.key)
echo "removed ", m.key
#close(store.openBlobStream(id, size, dataBlob))
echo "commit repaired set"
bs = await commit(store, bs)
writeLine(stdout, "recovered set is ", bs.setId.toHex)
except:
writeLine(stderr, "failed to check '", args[i], "', ", getCurrentExceptionMsg())
quit(-1)
proc replicateMain() {.async.} =
randomize()
var
args = newSeq[string]()
rng = initRand(rand(int.high))
for kind, key, val in getopt():
if kind == cmdArgument:
args.add key
if args.len > 3:
let
src = openStore()
dst = newHttpStore(args[1])
for i in 2..args.high:
try:
let
srcId = args[i].toBlobId
srcSet = src.load(srcId)
var dstSet = newBlobSet()
let stream = newMemberStream()
asyncCheck stream.streamMembers(src, srcSet)
var m: tuple[key: Key; id: BlobId; size: BiggestInt]
while true:
let (valid, m) = await stream.read()
if not valid: break
let
readStream = src.openBlobStream(m.id, m.size, dataBlob)
ingestStream = dst.openIngestStream(m.size, dataBlob)
var buf: array[0x16000, byte]
while true:
let n = await readStream.read(addr buf, buf.len)
if n == 0: break
await ingestStream.ingest(addr buf, n)
close readStream
let (otherId, otherSize) = await ingestStream.finish()
if otherId != m.id or otherSize != m.size:
writeLine(stderr, "replication mismatch ",
m.id, ":", m.size, " replicated to ",
otherId, ":", otherSize)
quit -1
dstSet = insert(dst, dstSet, m.key, m.id, m.size)
let
newSet = await commit(dst, dstSet)
dstId = newSet.setId
doAssert(dstId == srcId, "set mismatch after replication")
except:
writeLine(stderr, "failed to replicate '", args[i], "', ", getCurrentExceptionMsg())
quit -1
type
EvalError = object of CatchableError
@ -111,6 +178,7 @@ type
atomSet
atomPath
atomString
atomKey
atomNum
atomSymbol
atomError
@ -127,6 +195,8 @@ type
name: string
of atomString:
str: string
of atomKey:
key: Key
of atomNum:
num: BiggestInt
of atomSymbol:
@ -187,6 +257,9 @@ proc newAtomPath(s: string): Atom =
proc newAtomString(s: string): Atom =
Atom(kind: atomString, str: s)
proc newAtom(k: Key): Atom =
Atom(kind: atomKey, key: k)
proc newAtom(i: Natural): Atom =
Atom(kind: atomNum, num: i)
@ -312,6 +385,8 @@ proc print(a: Atom; s: Stream) =
if not valid: break
f.write chunk
]#
of atomKey:
s.write $a.key
of atomNum:
s.write $a.num
of atomSymbol:
@ -436,7 +511,7 @@ proc cborFunc(env: Env; arg: NodeObj): NodeRef =
proc commitFunc(env: Env; arg: NodeObj): NodeRef =
assertArgCount(arg, 1)
let cold = commit(env.store, arg.atom.bs)
let cold = waitFor commit(env.store, arg.atom.bs)
cold.newAtom.newNode
#[
@ -495,7 +570,7 @@ proc hexFunc(env: Env; args: NodeObj): NodeRef =
of atomSet:
case a.bs.kind
of hotNode:
let cold = commit(env.store, a.bs)
let cold = waitFor commit(env.store, a.bs)
cold.setId.toHex.newAtomString.newNode
of coldNode:
a.bs.setId.toHex.newAtomString.newNode
@ -572,9 +647,10 @@ proc pathFunc(env: Env; arg: NodeObj): NodeRef =
proc randomFunc(env: Env; arg: NodeObj): NodeRef =
assertArgCount(arg, 1)
var rng = initRand(rand(int.high))
let bs = arg.atom.bs
var random: NodeRef
env.store.randomApply(bs, rand(int.high)) do (id: BlobId; size: BiggestInt):
env.store.randomApply(bs, rng) do (id: BlobId; size: BiggestInt):
random = newNode(newAtom(id, size))
if random.isNil: newNodeList()
else: random
@ -719,7 +795,7 @@ else:
proc emptyMain() =
let
store = openStore()
bs = store.commit(newBlobSet())
bs = waitFor store.commit(newBlobSet())
echo bs.setId.toHex
proc replMain() =
@ -828,8 +904,7 @@ proc spryMain() =
else:
lines.add(line)
proc main() =
when isMainModule:
var cmd = ""
for kind, key, val in getopt():
if kind == cmdArgument:
@ -840,10 +915,9 @@ proc main() =
of "empty": emptyMain()
of "repl": replMain()
of "dump": dumpMain()
of "ingest": ingestMain()
of "server": serverMain()
of "ingest": waitFor ingestMain()
of "server": waitFor serverMain()
of "spry": spryMain()
of "check": checkMain()
else: quit("no such subcommand ")
main()
of "check": waitFor checkMain()
of "replicate": waitFor replicateMain()
else: quit("no such subcommand " & cmd)

View File

@ -1,4 +1,4 @@
import std/asyncdispatch
import std/asyncdispatch, std/asyncstreams
import std/hashes, std/streams, std/strutils, std/bitops, std/unicode, std/endians
import cbor, siphash
import ./blobsets/priv/hex
@ -100,25 +100,6 @@ proc toBlobId*(cbor: CborNode): BlobId =
proc toHex*(id: BlobId|SetId): string = hex.encode(id.data)
## Return BlobId encoded in hexidecimal.
proc take(cid: var BlobId; buf: var string) =
## Take a raw digest from a string buffer.
doAssert(buf.len == digestLen)
copyMem(cid.data[0].addr, buf[0].addr, digestLen)
proc blobHash*(buf: pointer; len: Natural): BlobId =
## Generate a BlobId for a string of data using the BLAKE2b hash algorithm.
var b: Blake2b256
init(b)
update(b, buf, len)
b.finish()
proc blobHash*(data: string): BlobId =
## Generate a BlobId for a string of data using the BLAKE2b hash algorithm.
var b: Blake2b256
init(b)
update(b, data)
b.finish()
proc verify*(cid: BlobId; data: string): bool =
## Verify that a string of data corresponds to a BlobId.
var b: Blake2b256
@ -201,6 +182,7 @@ type
BlobStore* = ref BlobStoreObj
BlobStoreObj* = object of RootObj
closeImpl*: proc (s: BlobStore) {.nimcall, gcsafe.}
containsImpl*: proc (s: BlobStore; id: BlobId; kind: BlobKind): Future[bool] {.nimcall, gcsafe.}
openBlobStreamImpl*: proc (s: BlobStore; id: BlobId; size: BiggestInt; kind: BlobKind): BlobStream {.nimcall, gcsafe.}
openIngestStreamImpl*: proc (s: BlobStore; size: BiggestInt; kind: BlobKind): IngestStream {.nimcall, gcsafe.}
@ -208,8 +190,14 @@ proc close*(s: BlobStore) =
## Close active store resources.
if not s.closeImpl.isNil: s.closeImpl(s)
proc contains*(s: BlobStore; id: BlobId; kind: BlobKind): Future[bool] =
## Check if the store contains a blob.
assert(not s.openBlobStreamImpl.isNil)
s.containsImpl(s, id, kind)
proc openBlobStream*(s: BlobStore; id: BlobId; size = 0.BiggestInt; kind = dataBlob): BlobStream =
## Return a new `BlobStream` for reading a blob.
assert(isNonZero id)
assert(not s.openBlobStreamImpl.isNil)
s.openBlobStreamImpl(s, id, size, kind)
@ -295,12 +283,13 @@ func newBlobSet*(id: SetId): BlobSet =
## Create a new cold blob set.
BlobSet(kind: coldNode, setId: id)
func sparseIndex(x: Key): int = int(x and keyChunkMask)
template sparseIndex(x: Key): uint64 = (uint64)x and keyChunkMask
template mask(x: Key): uint64 = 1'u64 shl int(x and keyChunkMask)
func compactIndex(t: BlobSet; x: Key): int =
if (x and keyChunkMask) != Key(0):
# TODO: bug in shr and shl, cannot shift all bits out
result = (int)countSetBits(t.bitmap shl (keyBits - x.sparseIndex))
result = (int)countSetBits(t.bitmap shl (keyBits - x.sparseIndex.int))
func masked(t: BlobSet; x: Key): bool =
((t.bitmap shr x.sparseIndex) and 1) != 0
@ -322,18 +311,8 @@ iterator dumpBlob*(store: BlobStore; id: BlobId): string =
buf.setLen(n)
yield buf
proc commit*(store: BlobStore; bs: BlobSet): BlobSet =
if bs.isCold: return bs
let tmp = BlobSet(kind: hotNode, bitmap: bs.bitmap, table: bs.table)
for e in tmp.table.mitems:
if e.isHot: e = store.commit e
let stream = store.openIngestStream(kind=metaBlob)
var buf = encode tmp.toCbor
waitFor stream.ingest(buf)
let (id, _) = waitFor finish(stream)
result = BlobSet(kind: coldNode, setId: id)
proc loadSet(store: BlobStore; id: SetId; depth: int): Future[BlobSet] {.async.} =
assert(isNonZero id)
assert((not Key(0)) shr depth != Key(0), "loadSet trie is too deep")
var
stream = store.openBlobStream(id, kind=metaBlob)
@ -396,21 +375,25 @@ proc randomApply*(store: BlobStore; trie: BlobSet; rng: var Rand;
trie = next
i = rng.rand(countSetBits(trie.bitmap)-1)
iterator items*(store: BlobStore; trie: BlobSet; rng: var Rand):
tuple[key: Key; id: BlobId; size: BiggestInt] =
## Apply to all members of the set in a pseuedo-random order
## derived from `seed`.
# TODO: add a progress value using the set bits in the bitmasks of each level?
type MemberStream* = FutureStream[tuple[key: Key; id: BlobId; size: BiggestInt]]
proc newMemberStream*(): FutureStream[tuple[key: Key; id: BlobId; size: BiggestInt]] =
newFutureStream[tuple[key: Key; id: BlobId; size: BiggestInt]]()
proc streamMembers*(stream: FutureStream[tuple[key: Key; id: BlobId; size: BiggestInt]];
store: BlobStore; trie: BlobSet) {.async.} =
## Pass each set member to the specified future stream in random order.
var
path: array[maxDepth.int, tuple[mask: uint64, trie: BlobSet]]
level = 0
rng = initRand(rand(high int))
if trie.isCold:
path[0].trie = store.load(trie)
else:
path[0].trie = trie
path[0].mask = not(0'u64) shr (64 - path[0].trie.table.len)
# set the bits of indexes to hit
while 0 < level or path[0].mask != 0'u64:
while (not stream.finished) and (0 < level or path[0].mask != 0'u64):
if path[level].mask == 0'u64:
dec level
continue
@ -422,13 +405,16 @@ iterator items*(store: BlobStore; trie: BlobSet; rng: var Rand):
path[level].mask = path[level].mask xor bi
var node = path[level].trie.table[i]
if node.kind == leafNode:
yield (node.key, node.blob, node.size)
let val: tuple[key: Key; id: BlobId; size: BiggestInt] =
(node.key, node.blob, node.size)
await stream.write(val)
else:
if node.isCold:
node = store.load(node)
inc level
path[level].mask = not (not(0'u64) shl node.table.len)
path[level].trie = node
complete stream
func nodeCount(bs: BlobSet): int =
## Count of internal nodes in set.
@ -447,14 +433,17 @@ func leafCount(bs: BlobSet): int =
else:
result.inc n.leafCount
func search*(trie: BlobSet; name: string): BlobId =
proc search*(store: BlobStore; trie: BlobSet; name: string): BlobId =
let key = name.toKey
var
n = trie
k = key
level = 0
while k != Key(0) and n.masked(k):
n = n.table[n.compactIndex(k)]
let i = n.compactIndex(k)
if n.table[i].isCold:
n.table[i] = store.load(n.table[i])
n = n.table[i]
if n.kind == leafNode:
if n.key == key:
return n.blob
@ -524,7 +513,7 @@ proc insert(store: BlobStore; trie, l: BlobSet; depth: int): BlobSet =
of coldNode:
discard
else:
result.bitmap = result.bitmap or (1'u64 shl key.sparseIndex)
result.bitmap = result.bitmap or key.mask
result.table.insert(l, result.compactIndex(key))
proc insert*(store: BlobStore; trie, node: BlobSet): BlobSet =
@ -539,43 +528,41 @@ proc insert*(store: BlobStore; t: BlobSet; key: Key; blob: BlobId; size: Biggest
proc insert*(store: BlobStore; t: BlobSet; name: string; blob: BlobId; size: BiggestInt): BlobSet =
insert(store, t, name.toKey, blob, size)
proc remove(store: BlobStore; trie: BlobSet; key: Key; depth: int): BlobSet =
proc remove(store: BlobStore; trie: BlobSet; fullKey: Key; depth: int): BlobSet =
result = trie
let key = key shr (depth * keyChunkBits)
if trie.masked(key):
let key = fullKey shr (depth * keyChunkBits)
if result.masked(key):
let
depth = depth + 1
i = trie.compactIndex(key)
if trie.table[i].isCold:
trie.table[i] = store.load(trie.table[i])
case trie.table[i].kind
i = result.compactIndex(key)
if result.table[i].isCold:
result.table[i] = store.load(result.table[i])
trie.table[i] = result.table[i]
case result.table[i].kind
of hotNode:
let newTrie = remove(store, trie.table[i], key, depth)
if newTrie != trie.table[i]:
if newTrie.isNil:
if trie.table.len == 1:
result = nil
else:
result = newBlobSet()
for j in trie.table.low..trie.table.high:
if j == i: continue
result = insert(store, result, newTrie, depth)
result.table[i] = remove(store, result.table[i], fullKey, depth)
of leafNode:
if trie.table.len == 1:
result = nil
if result.table.len == 2:
result.table.delete(i)
result = result.table[0]
else:
result.table.delete(i)
result.bitmap = result.bitmap xor key.mask
of coldNode:
discard # previously handled
proc remove*(store: BlobStore; trie: BlobSet; name: string): BlobSet =
proc remove*(store: BlobStore; trie: BlobSet; key: Key): BlobSet =
## Remove a blob from a trie.
if trie.isEmpty:
result = trie
else:
let key = name.toKey
result = remove(store, trie, key, 0)
if result.isNil:
result = newBlobSet()
proc remove*(store: BlobStore; trie: BlobSet; name: string): BlobSet =
remove(store, trie, name.toKey)
proc union*(store: BlobStore; sets: varargs[BlobSet]): BlobSet =
## Return the union of `sets`.
# TODO: lazy-load set
@ -589,16 +576,17 @@ proc union*(store: BlobStore; sets: varargs[BlobSet]): BlobSet =
func leafCount*(size: Natural): int = (size+blobLeafSize-1) div blobLeafSize
func compressTree*(leaves: var seq[BlobId]) =
func compressTree*(leaves: var openArray[BlobId]) =
var
ctx: Blake2b256
nodeOffset = 0
nodeDepth = 0
while leaves.len > 1:
len = leaves.len
while len > 1:
nodeOffset = 0
inc nodeDepth
var pos, next: int
while pos < leaves.len:
while pos < len:
ctx.init do (params: var Blake2bParams):
params.fanout = 2
params.depth = 255
@ -608,14 +596,50 @@ func compressTree*(leaves: var seq[BlobId]) =
inc nodeOffset
ctx.update(leaves[pos].data)
inc pos
if pos < leaves.len:
if pos < len:
ctx.update(leaves[pos].data)
inc pos
leaves[next] = ctx.finish()
inc next
leaves.setLen(next)
len = next
# TODO: BLAKE2 tree finalization flags
proc blobHash*(s: string): BlobId =
doAssert(s.len <= blobLeafSize)
var
ctx: Blake2b256
leaves: array[1, BlobId]
ctx.init do (params: var Blake2bParams):
params.fanout = 2
params.depth = 255
params.leafLength = blobLeafSize
params.nodeOffset = 0
if s.len > 0:
ctx.update(unsafeAddr s[0], s.len)
leaves[0] = finish ctx
compressTree(leaves)
leaves[0]
proc commit*(store: BlobStore; bs: BlobSet): Future[BlobSet] {.async.} =
if bs.isCold: return bs
let tmp = BlobSet(kind: hotNode, bitmap: bs.bitmap, table: bs.table)
for e in tmp.table.mitems:
if e.isHot: e = await store.commit e
var buf = encode tmp.toCbor
#let
# localId = blobHash(buf)
#echo "check if store has ", localId.toHex
#let
# present = await store.contains(localId, metaBlob)
#echo "check returned ", present
#if not present:
block:
let stream = store.openIngestStream(size=buf.len, kind=metaBlob)
await stream.ingest(buf)
let (storeId, _) = await finish(stream)
#assert(localId == storeId)
return BlobSet(kind: coldNode, setId: storeId)
#
# Null Store implementation
#

View File

@ -82,7 +82,7 @@ proc addBlobSets*(spry: Interpreter) =
let
env = BlobStoreNode(evalArgInfix(spry))
set = BlobSetSpryNode(evalArg(spry)).set
BlobSetSpryNode(set: env.store.commit(set))
BlobSetSpryNode(set: waitFor env.store.commit(set))
nimFunc "hex":
let id = BlobIdSpryNode(evalArg(spry)).id

View File

@ -28,14 +28,13 @@ suite "Http store":
test "ingest":
(blob, size) = waitFor client.ingestFile("tests/test_http.nim")
const count = 64
var
setId: SetId
bs: BlobSet
suite "store":
var
setId: SetId
bs: BlobSet
rng = initRand(rand(int.high))
const count = 64
var rng = initRand(rand(int.high))
test "commit":
bs = newBlobSet()
@ -44,8 +43,8 @@ suite "store":
name = $i
blob = waitFor client.ingest(newString(i))
echo "insert ", blob, " ", i
bs = insert(bs, name, blob, i)
setId = commit(client, bs).setId
bs = insert(client, bs, name, blob, i)
setId = (waitFor commit(client, bs)).setId
test "load":
bs = load(client, setId)
@ -53,7 +52,7 @@ suite "store":
let
name = $i
blob = blobHash newString(i)
other = bs.search(name)
other = search(store, bs, name)
#doAssert(other == blob)
for i in 1..count:
let
@ -71,14 +70,23 @@ suite "store":
let stream = store.openBlobStream(id, size, dataBlob)
close stream
test "applyAll":
bs = load(client, setId)
for i in 1..count:
var found = false
for key, id, size in items(store, bs, rng):
if i == size: found = true
if not found:
echo i, " not found"
apply(client, bs, $i) do (id: BlobId; size: BiggestInt):
echo "but ", i, " really is in the set"
raiseAssert($i)
test "stream":
proc findAll() {.async.} =
for i in 1..count:
let stream = newMemberStream()
asyncCheck stream.streamMembers(store, bs)
var
val: tuple[key: Key; id: BlobId; size: BiggestInt]
found = false
count = 0
while not found:
let (valid, val) = await stream.read()
if not valid: break
if i == val.size:
complete stream
found = true
inc count
doAssert(count > 0, "FutureStream is fucked")
doAssert(found, $i & " not found in set")
echo "found ", i, " after ", count
waitFor findAll()

View File

@ -5,13 +5,14 @@ import ../src/blobsets
suite "Blob set tests":
var
store = newNullStore()
randomCid = blobHash("")
# test "zero blob":
# doAssert(randomCid == zeroChunk)
proc randomize() =
randomCid = blobHash(randomCid.data.addr, randomCid.data.len)
randomCid = blobHash(randomCid.toHex)
proc testPath(s: BlobSet; root: string): BlobSet =
for path in walkDirRec(root):
@ -20,21 +21,21 @@ suite "Blob set tests":
blob = randomCid
str = $randomCid
doAssert(str.toBlobid == randomCid)
result = insert(s, path, blob, 0)
let found = result.search(path)
result = insert(store, s, path, blob, 0)
let found = search(store, result, path)
doAssert(found == randomCid)
test "functional insert":
let
a = newBlobSet()
b = insert(a, "foo", randomCid, 0)
c = insert(b, "bar", randomCid, 0)
doAssert(contains(b, "foo"))
doAssert(contains(c, "foo"))
doAssert(contains(c, "bar"))
doAssert(not contains(a, "foo"))
doAssert(not contains(a, "bar"))
doAssert(not contains(b, "bar"))
b = insert(store, a, "foo", randomCid, 0)
c = insert(store, b, "bar", randomCid, 0)
doAssert(contains(store, b, "foo"))
doAssert(contains(store, c, "foo"))
doAssert(contains(store, c, "bar"))
doAssert(not contains(store, a, "foo"))
doAssert(not contains(store, a, "bar"))
doAssert(not contains(store, b, "bar"))
test "apply":
var bs = newBlobSet()
@ -42,14 +43,26 @@ suite "Blob set tests":
let
name = $i
blob = blobHash name
bs = insert(bs, name, blob, 0)
bs = insert(store, bs, name, blob, 0)
for i in 1..1024:
let
name = $i
blob = blobHash name
other = bs.search(name)
other = search(store, bs, name)
doAssert(other == blob)
test "remove":
var bs = newBlobSet()
for i in 1..1024:
let
name = $i
blob = blobHash name
bs = insert(store, bs, name, blob, 0)
for i in 1..1024:
let name = $i
bs = remove(store, bs, name)
doAssert(not contains(store, bs, name))
test "sets":
var s = newBlobSet()
for kind, key, val in getopt():
@ -57,4 +70,3 @@ suite "Blob set tests":
s = s.testPath(key)
if s.isEmpty:
s = s.testPath(".")
echo s.leafCount, " leaves in ", s.nodeCount, " nodes"