Fix REPL
This commit is contained in:
parent
a163f062d3
commit
02c6fddad6
|
@ -0,0 +1,12 @@
|
|||
# Package
|
||||
|
||||
version = "0.1.1"
|
||||
author = "Emery Hemingway"
|
||||
description = "A simple content addressed file-system"
|
||||
license = "GPLv3"
|
||||
srcDir = "src"
|
||||
|
||||
requires "nim >= 0.18.0", "base58", "cbor >= 0.2.0"
|
||||
|
||||
bin = @["dagfs_repl.nim"]
|
||||
skipFiles = @["dagfs_repl.nim"]
|
Binary file not shown.
|
@ -0,0 +1,136 @@
|
|||
when not defined(genode):
|
||||
{.error: "Genode only Dagfs client".}
|
||||
|
||||
import cbor, genode, std/tables, std/strutils
|
||||
|
||||
import dagfs, dagfs/stores, dagfs/genode/dagfs_session
|
||||
|
||||
const
|
||||
currentPath = currentSourcePath.rsplit("/", 1)[0]
|
||||
dagfsClientH = currentPath & "/dagfs_client.h"
|
||||
{.passC: "-I" & currentPath & "/../../../genode/include".}
|
||||
|
||||
type
|
||||
DagfsClientBase {.importcpp, header: dagfsClientH.} = object
|
||||
DagfsClientCpp = Constructible[DagfsClientBase]
|
||||
|
||||
proc sigh_ack_avail(cpp: DagfsClientCpp; sig: SignalContextCapability) {.
|
||||
importcpp: "#->conn.channel().sigh_ack_avail(@)", tags: [RpcEffect].}
|
||||
|
||||
proc readyToSubmit(cpp: DagfsClientCpp): bool {.
|
||||
importcpp: "#->conn.source().ready_to_submit()".}
|
||||
|
||||
proc readyToAck(cpp: DagfsClientCpp): bool {.
|
||||
importcpp: "#->conn.source().ready_to_ack()".}
|
||||
|
||||
proc ackAvail(cpp: DagfsClientCpp): bool {.
|
||||
importcpp: "#->conn.source().ack_avail()".}
|
||||
|
||||
proc allocPacket(cpp: DagfsClientCpp; size = MaxPacketSize): DagfsPacket {.
|
||||
importcpp: "#->conn.source().alloc_packet(@)".}
|
||||
|
||||
proc packetContent(cpp: DagfsClientCpp; pkt: DagfsPacket): pointer {.
|
||||
importcpp: "#->conn.source().packet_content(@)".}
|
||||
|
||||
proc submitPacket(cpp: DagfsClientCpp; pkt: DagfsPacket; cid: cstring; op: DagfsOpcode) {.
|
||||
importcpp: "#->conn.source().submit_packet(Dagfs::Packet(#, (char const *)#, #))".}
|
||||
|
||||
proc getAckedPacket(cpp: DagfsClientCpp): DagfsPacket {.
|
||||
importcpp: "#->conn.source().get_acked_packet()".}
|
||||
|
||||
proc releasePacket(cpp: DagfsClientCpp; pkt: DagfsPacket) {.
|
||||
importcpp: "#->conn.source().release_packet(@)".}
|
||||
|
||||
type
|
||||
DagfsClient* = ref DagfsClientObj
|
||||
DagfsClientObj = object of DagfsStoreObj
|
||||
## IPLD session client
|
||||
cpp: DagfsClientCpp
|
||||
|
||||
proc icClose(s: DagfsStore) =
|
||||
var ic = DagfsClient(s)
|
||||
destruct ic.cpp
|
||||
|
||||
proc icPut(s: DagfsStore; blk: string): Cid =
|
||||
## Put block to Dagfs server, blocks for two packet round-trip.
|
||||
let ic = DagfsClient(s)
|
||||
var
|
||||
blk = blk
|
||||
pktCid = dagHash blk
|
||||
if pktCid == zeroBlock:
|
||||
return pktCid
|
||||
assert(ic.cpp.readyToSubmit, "Dagfs client packet queue congested")
|
||||
var pkt = ic.cpp.allocPacket(blk.len)
|
||||
let pktBuf = ic.cpp.packetContent pkt
|
||||
defer: ic.cpp.releasePacket pkt
|
||||
assert(not pktBuf.isNil, "allocated packet has nil content")
|
||||
assert(pkt.size >= blk.len)
|
||||
pkt.setLen blk.len
|
||||
copyMem(pktBuf, blk[0].addr, blk.len)
|
||||
assert(ic.cpp.readyToSubmit, "Dagfs client packet queue congested")
|
||||
ic.cpp.submitPacket(pkt, pktCid.toHex, PUT)
|
||||
let ack = ic.cpp.getAckedPacket()
|
||||
doAssert(ack.error == OK)
|
||||
result = ack.cid()
|
||||
assert(result.isValid, "server returned a packet with and invalid CID")
|
||||
|
||||
proc icGetBuffer(s: DagfsStore; cid: Cid; buf: pointer; len: Natural): int =
|
||||
## Get from Dagfs server, blocks for packet round-trip.
|
||||
let ic = DagfsClient(s)
|
||||
assert(ic.cpp.readyToSubmit, "Dagfs client packet queue congested")
|
||||
let pkt = ic.cpp.allocPacket len
|
||||
ic.cpp.submitPacket(pkt, cid.toHex, GET)
|
||||
let ack = ic.cpp.getAckedPacket
|
||||
doAssert(ack.cid == cid)
|
||||
if ack.error == OK:
|
||||
let pktBuf = ic.cpp.packetContent ack
|
||||
assert(not pktBuf.isNil, "ack packet has nil content")
|
||||
assert(ack.len <= len)
|
||||
assert(ack.len > 0)
|
||||
result = ack.len
|
||||
copyMem(buf, pktBuf, result)
|
||||
if pkt.size > 0:
|
||||
ic.cpp.releasePacket pkt
|
||||
# free the original packet that was allocated
|
||||
case ack.error:
|
||||
of OK: discard
|
||||
of MISSING:
|
||||
raise cid.newMissingObject
|
||||
else:
|
||||
raise newException(CatchableError, "Dagfs packet error " & $ack.error)
|
||||
|
||||
proc icGet(s: DagfsStore; cid: Cid; result: var string) =
|
||||
## Get from Dagfs server, blocks for packet round-trip.
|
||||
let ic = DagfsClient(s)
|
||||
assert(ic.cpp.readyToSubmit, "Dagfs client packet queue congested")
|
||||
let pkt = ic.cpp.allocPacket()
|
||||
defer: ic.cpp.releasePacket pkt
|
||||
ic.cpp.submitPacket(pkt, cid.toHex, GET)
|
||||
let ack = ic.cpp.getAckedPacket()
|
||||
doAssert(ack.cid == cid)
|
||||
case ack.error:
|
||||
of OK:
|
||||
let ackBuf = ic.cpp.packetContent ack
|
||||
assert(not ackBuf.isNil)
|
||||
assert(ack.len > 0)
|
||||
result.setLen ack.len
|
||||
copyMem(result[0].addr, ackBuf, result.len)
|
||||
assert(cid.verify(result), "Dagfs client packet failed verification")
|
||||
of MISSING:
|
||||
raise cid.newMissingObject
|
||||
else:
|
||||
raise newException(CatchableError, "Dagfs packet error " & $ack.error)
|
||||
|
||||
const
|
||||
DefaultDagfsBufferSize* = 1 shl 20
|
||||
|
||||
proc newDagfsClient*(env: GenodeEnv; label = ""; bufferSize = DefaultDagfsBufferSize): DagfsClient =
|
||||
## Blocks retrieved by `get` are not verified.
|
||||
proc construct(cpp: DagfsClientCpp; env: GenodeEnv; label: cstring; txBufSize: int) {.
|
||||
importcpp.}
|
||||
new result
|
||||
construct(result.cpp, env, label, bufferSize)
|
||||
result.closeImpl = icClose
|
||||
result.putImpl = icPut
|
||||
result.getBufferImpl = icGetBuffer
|
||||
result.getImpl = icGet
|
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
* \brief IPLD C++ session component
|
||||
* \author Emery Hemingway
|
||||
* \date 2017-11-07
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2017 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
#ifndef _INCLUDE__NIM__IPLDSERVER_H_
|
||||
#define _INCLUDE__NIM__IPLDSERVER_H_
|
||||
|
||||
#include <ipld_session/rpc_object.h>
|
||||
#include <base/heap.h>
|
||||
#include <base/attached_ram_dataspace.h>
|
||||
|
||||
struct Communication_buffer
|
||||
{
|
||||
Genode::Attached_ram_dataspace _tx_ds;
|
||||
|
||||
Communication_buffer(Genode::Pd_session &pd,
|
||||
Genode::Region_map &rm,
|
||||
Genode::size_t tx_buf_size)
|
||||
: _tx_ds(pd, rm, tx_buf_size) { }
|
||||
};
|
||||
|
||||
struct IpldSessionComponentBase : Communication_buffer,
|
||||
Ipld::Session_rpc_object
|
||||
{
|
||||
static Genode::size_t tx_buf_size(char const *args)
|
||||
{
|
||||
Genode::size_t const buf_size = Genode::Arg_string::find_arg(
|
||||
args, "tx_buf_size").aligned_size();
|
||||
if (!buf_size)
|
||||
throw Genode::Service_denied();
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
IpldSessionComponentBase(Genode::Env *env, char const *args)
|
||||
:
|
||||
Communication_buffer(env->pd(), env->rm(), tx_buf_size(args)),
|
||||
Session_rpc_object(env->rm(), env->ep().rpc_ep(), _tx_ds.cap())
|
||||
{ }
|
||||
|
||||
void packetHandler(Genode::Signal_context_capability cap)
|
||||
{
|
||||
_tx.sigh_ready_to_ack(cap);
|
||||
_tx.sigh_packet_avail(cap);
|
||||
}
|
||||
};
|
||||
|
||||
#endif /* _INCLUDE__NIM__IPLDSERVER_H_ */
|
|
@ -0,0 +1,142 @@
|
|||
#
|
||||
# \brief IPLD server factory
|
||||
# \author Emery Hemingway
|
||||
# \date 2017-11-11
|
||||
#
|
||||
|
||||
#
|
||||
# Copyright (C) 2017 Genode Labs GmbH
|
||||
#
|
||||
# This file is part of the Genode OS framework, which is distributed
|
||||
# under the terms of the GNU Affero General Public License version 3.
|
||||
#
|
||||
|
||||
import std/strtabs, std/tables, std/xmltree, std/strutils
|
||||
|
||||
import cbor, genode, genode/signals, genode/servers, ipld, ipld/store, ipldsession
|
||||
|
||||
const
|
||||
currentPath = currentSourcePath.rsplit("/", 1)[0]
|
||||
ipldserverH = currentPath & "/ipldserver.h"
|
||||
|
||||
type
|
||||
IpldSessionComponentBase {.importcpp, header: ipldserverH.} = object
|
||||
SessionCpp = Constructible[IpldSessionComponentBase]
|
||||
Session = ref object
|
||||
cpp: SessionCpp
|
||||
sig: SignalHandler
|
||||
store: IpldStore
|
||||
id: SessionId
|
||||
label: string
|
||||
|
||||
proc processPacket(session: Session; pkt: var IpldPacket) =
|
||||
proc packetContent(cpp: SessionCpp; pkt: IpldPacket): pointer {.
|
||||
importcpp: "#->sink().packet_content(@)".}
|
||||
let cid = pkt.cid
|
||||
case pkt.operation
|
||||
of PUT:
|
||||
try:
|
||||
var
|
||||
pktBuf = session.cpp.packetContent pkt
|
||||
heapBuf = newString pkt.len
|
||||
copyMem(heapBuf[0].addr, pktBuf, heapBuf.len)
|
||||
let putCid = session.store.put(heapBuf, cid.hash)
|
||||
assert(putCid.isValid, "server packet returned invalid CID from put")
|
||||
pkt.setCid putCid
|
||||
except:
|
||||
echo "unhandled PUT error ", getCurrentExceptionMsg()
|
||||
pkt.setError ERROR
|
||||
of GET:
|
||||
try:
|
||||
let
|
||||
pktBuf = session.cpp.packetContent pkt
|
||||
n = session.store.getBuffer(cid, pktBuf, pkt.size)
|
||||
pkt.setLen n
|
||||
except BufferTooSmall:
|
||||
pkt.setError OVERSIZE
|
||||
except MissingObject:
|
||||
pkt.setError MISSING
|
||||
except:
|
||||
echo "unhandled GET error ", getCurrentExceptionMsg()
|
||||
pkt.setError ERROR
|
||||
else:
|
||||
echo "invalid packet operation"
|
||||
pkt.setError ERROR
|
||||
|
||||
proc newSession(env: GenodeEnv; store: IpldStore; id: SessionId; label, args: string): Session =
|
||||
## Create a new session and packet handling procedure
|
||||
let session = new Session
|
||||
assert(not session.isNil)
|
||||
proc construct(cpp: SessionCpp; env: GenodeEnv; args: cstring) {.importcpp.}
|
||||
session.cpp.construct(env, args)
|
||||
session.store = store
|
||||
session.id = id
|
||||
session.label = label
|
||||
session.sig = env.ep.newSignalHandler do ():
|
||||
proc packetAvail(cpp: SessionCpp): bool {.
|
||||
importcpp: "#->sink().packet_avail()".}
|
||||
proc readyToAck(cpp: SessionCpp): bool {.
|
||||
importcpp: "#->sink().ready_to_ack()".}
|
||||
while session.cpp.packetAvail and session.cpp.readyToAck:
|
||||
proc getPacket(cpp: SessionCpp): IpldPacket {.
|
||||
importcpp: "#->sink().get_packet()".}
|
||||
var pkt = session.cpp.getPacket()
|
||||
session.processPacket pkt
|
||||
proc acknowledgePacket(cpp: SessionCpp; pkt: IpldPacket) {.
|
||||
importcpp: "#->sink().acknowledge_packet(@)".}
|
||||
session.cpp.acknowledgePacket(pkt)
|
||||
|
||||
proc packetHandler(cpp: SessionCpp; cap: SignalContextCapability) {.
|
||||
importcpp: "#->packetHandler(@)".}
|
||||
session.cpp.packetHandler(session.sig.cap)
|
||||
result = session
|
||||
|
||||
proc manage(ep: Entrypoint; s: Session): IpldSessionCapability =
|
||||
## Manage a session from the default entrypoint.
|
||||
proc manage(ep: Entrypoint; cpp: SessionCpp): IpldSessionCapability {.
|
||||
importcpp: "#.manage(*#)".}
|
||||
result = ep.manage(s.cpp)
|
||||
GC_ref s
|
||||
|
||||
proc dissolve(ep: Entrypoint; s: Session) =
|
||||
## Dissolve a session from the entrypoint so that it can be freed.
|
||||
proc dissolve(ep: Entrypoint; cpp: SessionCpp) {.
|
||||
importcpp: "#.dissolve(*#)".}
|
||||
ep.dissolve(s.cpp)
|
||||
destruct(s.cpp)
|
||||
dissolve(s.sig)
|
||||
GC_unref s
|
||||
|
||||
type
|
||||
IpldServer* = ref object
|
||||
env: GenodeEnv
|
||||
store*: IpldStore
|
||||
sessions*: Table[SessionId, Session]
|
||||
|
||||
proc newIpldServer*(env: GenodeEnv; store: IpldStore): IpldServer =
|
||||
IpldServer(
|
||||
env: env, store: store,
|
||||
sessions: initTable[SessionId, Session]())
|
||||
|
||||
proc create*(server: IpldServer; id: SessionId; label, args: string) =
|
||||
if not server.sessions.contains id:
|
||||
try:
|
||||
let
|
||||
session = newSession(server.env, server.store, id, label, args)
|
||||
cap = server.env.ep.manage(session)
|
||||
server.sessions[id] = session
|
||||
proc deliverSession(env: GenodeEnv; id: SessionId; cap: IpldSessionCapability) {.
|
||||
importcpp: "#->parent().deliver_session_cap(Genode::Parent::Server::Id{#}, #)".}
|
||||
server.env.deliverSession(id, cap)
|
||||
echo "session opened for ", label
|
||||
except:
|
||||
echo "failed to create session for '", label, "', ", getCurrentExceptionMsg()
|
||||
server.env.sessionResponseDeny id
|
||||
|
||||
proc close*(server: IpldServer; id: SessionId) =
|
||||
## Close a session at the IPLD server.
|
||||
if server.sessions.contains id:
|
||||
let session = server.sessions[id]
|
||||
server.env.ep.dissolve(session)
|
||||
server.sessions.del id
|
||||
server.env.sessionResponseClose id
|
|
@ -0,0 +1,47 @@
|
|||
#
|
||||
# \brief IPLD session definitions
|
||||
# \author Emery Hemingway
|
||||
# \date 2017-11-11
|
||||
#
|
||||
|
||||
#
|
||||
# Copyright (C) 2017 Genode Labs GmbH
|
||||
#
|
||||
# This file is part of the Genode OS framework, which is distributed
|
||||
# under the terms of the GNU Affero General Public License version 3.
|
||||
#
|
||||
|
||||
import ipld
|
||||
|
||||
const MaxPacketSize* = 1 shl 18;
|
||||
|
||||
type
|
||||
IpldSessionCapability* {.final, pure,
|
||||
importcpp: "Ipld::Session_capability",
|
||||
header: "<ipld_session/capability.h>".} = object
|
||||
|
||||
IpldPacket* {.
|
||||
importcpp: "Ipld::Packet",
|
||||
header: "<ipld_session/ipld_session.h>".} = object
|
||||
|
||||
IpldOpcode* {.importcpp: "Ipld::Packet::Opcode".} = enum
|
||||
PUT, GET, INVALID
|
||||
|
||||
IpldError* {.importcpp: "Ipld::Packet::Error".} = enum
|
||||
OK, MISSING, OVERSIZE, FULL, ERROR
|
||||
|
||||
proc size*(pkt: IpldPacket): csize {.importcpp.}
|
||||
## Physical packet size.
|
||||
|
||||
proc cidStr(p: IpldPacket): cstring {.importcpp: "#.cid().string()".}
|
||||
proc cid*(p: IpldPacket): Cid = parseCid $p.cidStr
|
||||
proc setCid*(p: var IpldPacket; cid: cstring) {.importcpp: "#.cid(@)".}
|
||||
proc setCid*(p: var IpldPacket; cid: Cid) = p.setCid(cid.toHex())
|
||||
|
||||
proc operation*(pkt: IpldPacket): IpldOpcode {.importcpp.}
|
||||
proc len*(pkt: IpldPacket): csize {.importcpp: "length".}
|
||||
## Logical packet length.
|
||||
proc setLen*(pkt: var IpldPacket; len: int) {.importcpp: "length".}
|
||||
## Set logical packet length.
|
||||
proc error*(pkt: IpldPacket): IpldError {.importcpp.}
|
||||
proc setError*(pkt: var IpldPacket; err: IpldError) {.importcpp: "error".}
|
|
@ -0,0 +1,28 @@
|
|||
/*
|
||||
* \brief C++ base of Dagfs client
|
||||
* \author Emery Hemingway
|
||||
* \date 2017-11-08
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright (C) 2017 Genode Labs GmbH
|
||||
*
|
||||
* This file is part of the Genode OS framework, which is distributed
|
||||
* under the terms of the GNU Affero General Public License version 3.
|
||||
*/
|
||||
|
||||
/* Genode includes */
|
||||
#include <dagfs_session/connection.h>
|
||||
#include <base/heap.h>
|
||||
|
||||
struct DagfsClientBase
|
||||
{
|
||||
Genode::Heap heap;
|
||||
Genode::Allocator_avl tx_packet_alloc { &heap };
|
||||
Dagfs::Connection conn;
|
||||
|
||||
DagfsClientBase(Genode::Env *env, char const *label, Genode::size_t tx_buf_size)
|
||||
: heap(env->pd(), env->rm()),
|
||||
conn(*env, tx_packet_alloc, label, tx_buf_size)
|
||||
{ }
|
||||
};
|
|
@ -1,56 +0,0 @@
|
|||
#
|
||||
# \brief Server-side IPLD session interface
|
||||
# \author Emery Hemingway
|
||||
# \date 2017-11-04
|
||||
#
|
||||
|
||||
#
|
||||
# Copyright (C) 2017 Genode Labs GmbH
|
||||
#
|
||||
# This file is part of the Genode OS framework, which is distributed
|
||||
# under the terms of the GNU Affero General Public License version 3.
|
||||
#
|
||||
|
||||
import xmltree, strtabs, xmlparser, streams, tables,
|
||||
genode, genode/servers, genode/roms, ipld/genode/ipldserver, ipld/ipfsdaemon
|
||||
|
||||
proc newDaemonStore(env: GenodeEnv): IpfsStore =
|
||||
## Open a connection to an IPFS daemon.
|
||||
try:
|
||||
let
|
||||
configRom = env.newRomClient("config")
|
||||
config = configRom.xml
|
||||
close configRom
|
||||
let daemonUrl = config.attrs["ipfs_url"]
|
||||
result = newIpfsStore(daemonUrl)
|
||||
except:
|
||||
let err = getCurrentException()
|
||||
quit("failed to connect IPFS, " & err.msg)
|
||||
|
||||
componentConstructHook = proc (env: GenodeEnv) =
|
||||
let
|
||||
store = env.newDaemonStore()
|
||||
# Server backend
|
||||
server = env.newIpldServer(store)
|
||||
# Store server
|
||||
|
||||
proc processSessions(rom: RomClient) =
|
||||
## ROM signal handling procedure
|
||||
## Create and close 'Ipld' sessions from the
|
||||
## 'sessions_requests' ROM.
|
||||
update rom
|
||||
var requests = initSessionRequestsParser(rom)
|
||||
|
||||
for id in requests.close:
|
||||
server.close id
|
||||
|
||||
for id, service, label in requests.create:
|
||||
if service == "Ipld":
|
||||
server.create id, label, requests.args
|
||||
|
||||
let sessionsHandle = env.newRomHandler(
|
||||
"session_requests", processSessions)
|
||||
|
||||
env.announce "Ipld"
|
||||
process sessionsHandle
|
||||
# Process request backlog and return to the entrypoint.
|
|
@ -0,0 +1,140 @@
|
|||
import std/hashes, std/streams, std/strutils
|
||||
import base58/bitcoin, cbor
|
||||
import ./dagfs/priv/hex, ./dagfs/priv/blake2
|
||||
|
||||
const
|
||||
maxBlockSize* = 1 shl 18
|
||||
## Maximum supported block size.
|
||||
digestLen* = 32
|
||||
## Length of a block digest.
|
||||
|
||||
type Cid* = object
|
||||
## Content IDentifier, used to identify blocks.
|
||||
digest*: array[digestLen, uint8]
|
||||
|
||||
proc initCid*(): Cid = Cid()
|
||||
## Initialize an invalid CID.
|
||||
|
||||
proc isValid*(x: Cid): bool =
|
||||
## Check that a CID has been properly initialized.
|
||||
for c in x.digest.items:
|
||||
if c != 0: return true
|
||||
|
||||
proc `==`*(x, y: Cid): bool =
|
||||
## Compare two CIDs.
|
||||
for i in 0..<digestLen:
|
||||
if x.digest[i] != y.digest[i]:
|
||||
return false
|
||||
true
|
||||
|
||||
proc `==`*(cbor: CborNode; cid: Cid): bool =
|
||||
## Compare a CBOR node with a CID.
|
||||
if cbor.kind == cborBytes:
|
||||
for i in 0..<digestLen:
|
||||
if cid.digest[i] != cbor.bytes[i].uint8:
|
||||
return false
|
||||
result = true
|
||||
|
||||
proc hash*(cid: Cid): Hash = hash cid.digest
|
||||
## Reduce a CID into an integer for use in tables.
|
||||
|
||||
proc toCbor*(cid: Cid): CborNode = newCborBytes cid.digest
|
||||
## Generate a CBOR representation of a CID.
|
||||
|
||||
proc toCid*(cbor: CborNode): Cid =
|
||||
## Generate a CBOR representation of a CID.
|
||||
assert(cbor.bytes.len == digestLen)
|
||||
for i in 0..<digestLen:
|
||||
result.digest[i] = cbor.bytes[i].uint8
|
||||
|
||||
{.deprecated: [newCborBytes: toCbor].}
|
||||
|
||||
proc toHex*(cid: Cid): string = hex.encode(cid.digest)
|
||||
## Return CID encoded in hexidecimal.
|
||||
|
||||
proc writeUvarint*(s: Stream; n: SomeInteger) =
|
||||
## Write an IPFS varint
|
||||
var n = n
|
||||
while true:
|
||||
let c = int8(n and 0x7f)
|
||||
n = n shr 7
|
||||
if n == 0:
|
||||
s.write((char)c.char)
|
||||
break
|
||||
else:
|
||||
s.write((char)c or 0x80)
|
||||
|
||||
proc readUvarint*(s: Stream): BiggestInt =
|
||||
## Read an IPFS varint
|
||||
var shift: int
|
||||
while shift < (9*8):
|
||||
let c = (BiggestInt)s.readChar
|
||||
result = result or ((c and 0x7f) shl shift)
|
||||
if (c and 0x80) == 0:
|
||||
break
|
||||
shift.inc 7
|
||||
|
||||
proc toIpfs*(cid: Cid): string =
|
||||
## Return CID encoded in IPFS multimulti.
|
||||
const
|
||||
multiRaw = 0x55
|
||||
multiBlake2b_256 = 0xb220
|
||||
let s = newStringStream()
|
||||
s.writeUvarint 1
|
||||
s.writeUvarint multiRaw
|
||||
s.writeUvarint multi_blake2b_256
|
||||
s.writeUvarint digestLen
|
||||
for e in cid.digest:
|
||||
s.write e
|
||||
s.setPosition 0
|
||||
result = 'z' & bitcoin.encode(s.readAll)
|
||||
close s
|
||||
|
||||
proc `$`*(cid: Cid): string = toHex cid
|
||||
## Return CID in base 58, the default textual encoding.
|
||||
|
||||
proc parseCid*(s: string): Cid =
|
||||
## Detect CID encoding and parse from a string.
|
||||
var raw = parseHexStr s
|
||||
if raw.len != digestLen:
|
||||
raise newException(ValueError, "invalid ID length")
|
||||
for i in 0..<digestLen:
|
||||
result.digest[i] = raw[i].byte
|
||||
|
||||
const
|
||||
zeroBlock* = parseCid "8ddb61928ec76e4ee904cd79ed977ab6f5d9187f1102975060a6ba6ce10e5481"
|
||||
## CID of zero block of maximum size.
|
||||
|
||||
proc take*(cid: var Cid; buf: var string) =
|
||||
## Take a raw digest from a string buffer.
|
||||
doAssert(buf.len == digestLen)
|
||||
copyMem(cid.digest[0].addr, buf[0].addr, digestLen)
|
||||
|
||||
proc dagHash*(data: string): Cid =
|
||||
## Generate a CID for a string of data using the BLAKE2b hash algorithm.
|
||||
assert(data.len <= maxBlockSize)
|
||||
var b: Blake2b
|
||||
blake2b_init(b, digestLen, nil, 0)
|
||||
blake2b_update(b, data, data.len)
|
||||
var s = blake2b_final(b)
|
||||
copyMem(result.digest[0].addr, s[0].addr, digestLen)
|
||||
|
||||
proc verify*(cid: Cid; data: string): bool =
|
||||
## Verify that a string of data corresponds to a CID.
|
||||
var b: Blake2b
|
||||
blake2b_init(b, digestLen, nil, 0)
|
||||
blake2b_update(b, data, data.len)
|
||||
let digest = blake2b_final(b)
|
||||
for i in 0..<digestLen:
|
||||
if cid.digest[i] != digest[i]:
|
||||
return false
|
||||
true
|
||||
|
||||
iterator simpleChunks*(s: Stream; size = maxBlockSize): string =
|
||||
## Iterator that breaks a stream into simple chunks.
|
||||
doAssert(size <= maxBlockSize)
|
||||
var tmp = newString(size)
|
||||
while not s.atEnd:
|
||||
tmp.setLen(size)
|
||||
tmp.setLen(s.readData(tmp[0].addr, size))
|
||||
yield tmp
|
|
@ -0,0 +1,360 @@
|
|||
import strutils, streams, tables, cbor, os, math
|
||||
|
||||
import ../dagfs, ./stores
|
||||
|
||||
type EntryKey = enum
|
||||
typeKey = 1,
|
||||
dataKey = 2,
|
||||
sizeKey = 3
|
||||
|
||||
type FsType* = enum
|
||||
ufsFile = 0,
|
||||
ufsDir = 1
|
||||
|
||||
type FsKind* = enum
|
||||
fileNode,
|
||||
dirNode,
|
||||
shallowDir,
|
||||
shallowFile
|
||||
|
||||
type
|
||||
FileLink* = object
|
||||
cid*: Cid
|
||||
size*: int
|
||||
|
||||
FsNode* = ref object
|
||||
cid: Cid
|
||||
case kind*: FsKind
|
||||
of fileNode:
|
||||
links*: seq[FileLink]
|
||||
of dirNode:
|
||||
entries: OrderedTable[string, FsNode]
|
||||
of shallowFile, shallowDir:
|
||||
discard
|
||||
size: BiggestInt
|
||||
|
||||
proc isRaw*(file: FsNode): bool =
|
||||
file.links.len == 0
|
||||
|
||||
proc cid*(u: FsNode): Cid =
|
||||
assert u.cid.isValid
|
||||
u.cid
|
||||
|
||||
proc isFile*(u: FsNode): bool = u.kind in { fileNode, shallowFile }
|
||||
proc isDir*(u: FsNode): bool = u.kind in { dirNode, shallowDir }
|
||||
|
||||
proc size*(u: FsNode): BiggestInt =
|
||||
if u.kind == dirNode: u.entries.len.BiggestInt
|
||||
else: u.size
|
||||
|
||||
proc newFsRoot*(): FsNode =
|
||||
FsNode(
|
||||
cid: initCid(),
|
||||
kind: dirNode,
|
||||
entries: initOrderedTable[string, FsNode](8))
|
||||
|
||||
proc newUnixfsFile*(): FsNode =
|
||||
FsNode(kind: fileNode, cid: initCid())
|
||||
|
||||
proc newUnixfsDir*(cid: Cid): FsNode =
|
||||
FsNode(cid: cid, kind: dirNode)
|
||||
|
||||
proc add*(root: var FsNode; name: string; node: FsNode) =
|
||||
root.entries[name] = node
|
||||
|
||||
proc addDir*(root: var FsNode; name: string; cid: Cid) {.deprecated.} =
|
||||
assert cid.isValid
|
||||
root.add name, FsNode(kind: dirNode, cid: cid)
|
||||
|
||||
proc addFile*(root: var FsNode; name: string; cid: Cid; size: BiggestInt) {.deprecated.} =
|
||||
assert cid.isValid
|
||||
root.add name, FsNode(kind: fileNode, cid: cid, size: size)
|
||||
|
||||
proc del*(dir: var FsNode; name: string) =
|
||||
dir.entries.del name
|
||||
|
||||
const
|
||||
DirTag* = 0xda3c80 ## CBOR tag for UnixFS directories
|
||||
FileTag* = 0xda3c81 ## CBOR tag for UnixFS files
|
||||
|
||||
proc isUnixfs*(bin: string): bool =
|
||||
## Check if a string contains a UnixFS node
|
||||
## in CBOR form.
|
||||
var
|
||||
s = newStringStream bin
|
||||
c: CborParser
|
||||
try:
|
||||
c.open s
|
||||
c.next
|
||||
if c.kind == CborEventKind.cborTag:
|
||||
result = c.tag == DirTag or c.tag == FileTag
|
||||
except ValueError: discard
|
||||
close s
|
||||
|
||||
proc toCbor*(u: FsNode): CborNode =
|
||||
case u.kind
|
||||
of fileNode:
|
||||
let array = newCborArray()
|
||||
array.seq.setLen u.links.len
|
||||
for i in 0..u.links.high:
|
||||
let L = newCborMap()
|
||||
# typeEntry is reserved but not in use
|
||||
L[dataKey.int] = u.links[i].cid.newCborBytes
|
||||
L[sizeKey.int] = u.links[i].size.newCborInt
|
||||
array.seq[i] = L
|
||||
result = newCborTag(FileTag, array)
|
||||
of dirNode:
|
||||
let map = newCborMap()
|
||||
for name, node in u.entries:
|
||||
var entry = newCborMap()
|
||||
case node.kind
|
||||
of fileNode, shallowFile:
|
||||
entry[typeKey.int] = ufsFile.int.newCborInt
|
||||
entry[dataKey.int] = node.cid.newCborBytes
|
||||
entry[sizeKey.int] = node.size.newCborInt
|
||||
of dirNode:
|
||||
entry[typeKey.int] = ufsDir.int.newCborInt
|
||||
entry[dataKey.int] = node.cid.newCborBytes
|
||||
entry[sizeKey.int] = node.entries.len.newCborInt
|
||||
of shallowdir:
|
||||
entry[typeKey.int] = ufsDir.int.newCborInt
|
||||
entry[dataKey.int] = node.cid.newCborBytes
|
||||
entry[sizeKey.int] = node.size.int.newCborInt
|
||||
map[name] = entry
|
||||
# TODO: the CBOR maps must be sorted
|
||||
result = newCborTag(DirTag, map)
|
||||
else:
|
||||
raiseAssert "shallow FsNodes can not be encoded"
|
||||
|
||||
template parseAssert(cond: bool; msg = "") =
|
||||
if not cond: raise newException(
|
||||
ValueError,
|
||||
if msg == "": "invalid UnixFS CBOR" else: "invalid UnixFS CBOR, " & msg)
|
||||
|
||||
proc parseFs*(raw: string; cid: Cid): FsNode =
|
||||
## Parse a string containing CBOR data into a FsNode.
|
||||
new result
|
||||
result.cid = cid
|
||||
var
|
||||
c: CborParser
|
||||
buf = ""
|
||||
open(c, newStringStream(raw))
|
||||
next c
|
||||
parseAssert(c.kind == CborEventKind.cborTag, "data not tagged")
|
||||
let tag = c.tag
|
||||
if tag == FileTag:
|
||||
result.kind = fileNode
|
||||
next c
|
||||
parseAssert(c.kind == CborEventKind.cborArray, "file data not an array")
|
||||
let nLinks = c.arrayLen
|
||||
result.links = newSeq[FileLink](nLinks)
|
||||
for i in 0..<nLinks:
|
||||
next c
|
||||
parseAssert(c.kind == CborEventKind.cborMap, "file array does not contain maps")
|
||||
let nAttrs = c.mapLen
|
||||
for _ in 1..nAttrs:
|
||||
next c
|
||||
parseAssert(c.kind == CborEventKind.cborPositive, "link map key not an integer")
|
||||
let key = c.readInt.EntryKey
|
||||
next c
|
||||
case key
|
||||
of typeKey:
|
||||
parseAssert(false, "type file links are not supported")
|
||||
of dataKey:
|
||||
parseAssert(c.kind == CborEventKind.cborBytes, "CID not encoded as bytes")
|
||||
c.readBytes buf
|
||||
result.links[i].cid.take buf
|
||||
of sizeKey:
|
||||
parseAssert(c.kind == CborEventKind.cborPositive, "link size not encoded properly")
|
||||
result.links[i].size = c.readInt
|
||||
result.size.inc result.links[i].size
|
||||
elif tag == DirTag:
|
||||
result.kind = dirNode
|
||||
next c
|
||||
parseAssert(c.kind == CborEventKind.cborMap)
|
||||
let dirLen = c.mapLen
|
||||
parseAssert(dirLen != -1, raw)
|
||||
result.entries = initOrderedTable[string, FsNode](dirLen.nextPowerOfTwo)
|
||||
for i in 1 .. dirLen:
|
||||
next c
|
||||
parseAssert(c.kind == CborEventKind.cborText, raw)
|
||||
c.readText buf
|
||||
parseAssert(not buf.contains({ '/', '\0'}), raw)
|
||||
next c
|
||||
parseAssert(c.kind == CborEventKind.cborMap)
|
||||
let nAttrs = c.mapLen
|
||||
parseAssert(nAttrs > 1, raw)
|
||||
let entry = new FsNode
|
||||
result.entries[buf] = entry
|
||||
for i in 1 .. nAttrs:
|
||||
next c
|
||||
parseAssert(c.kind == CborEventKind.cborPositive)
|
||||
case c.readInt.EntryKey
|
||||
of typeKey:
|
||||
next c
|
||||
case c.readInt.FsType
|
||||
of ufsFile: entry.kind = shallowFile
|
||||
of ufsDir: entry.kind = shallowDir
|
||||
of dataKey:
|
||||
next c
|
||||
c.readBytes buf
|
||||
entry.cid.take buf
|
||||
of sizeKey:
|
||||
next c
|
||||
entry.size = c.readInt
|
||||
else:
|
||||
parseAssert(false, raw)
|
||||
next c
|
||||
parseAssert(c.kind == cborEof, "trailing data")
|
||||
|
||||
proc toStream*(node: FsNode; s: Stream) =
|
||||
let c = node.toCbor()
|
||||
c.toStream s
|
||||
|
||||
iterator items*(dir: FsNode): (string, FsNode) =
|
||||
assert(dir.kind == dirNode)
|
||||
for k, v in dir.entries.pairs:
|
||||
yield (k, v)
|
||||
|
||||
proc containsFile*(dir: FsNode; name: string): bool =
|
||||
doAssert(dir.kind == dirNode)
|
||||
dir.entries.contains name
|
||||
|
||||
proc `[]`*(dir: FsNode; name: string): FsNode =
|
||||
if dir.kind == dirNode:
|
||||
result = dir.entries.getOrDefault name
|
||||
|
||||
proc `[]`*(dir: FsNode; index: int): (string, FsNode) =
|
||||
result[0] = ""
|
||||
if dir.kind == dirNode:
|
||||
var i = 0
|
||||
for name, node in dir.entries.pairs:
|
||||
if i == index:
|
||||
result = (name, node)
|
||||
break
|
||||
inc i
|
||||
|
||||
proc lookupFile*(dir: FsNode; name: string): tuple[cid: Cid, size: BiggestInt] =
|
||||
doAssert(dir.kind == dirNode)
|
||||
let f = dir.entries[name]
|
||||
if f.kind == fileNode:
|
||||
result.cid = f.cid
|
||||
result.size = f.size
|
||||
|
||||
proc addFile*(store: DagfsStore; path: string): FsNode =
|
||||
## Add a file to the store and a FsNode.
|
||||
let
|
||||
fStream = newFileStream(path, fmRead)
|
||||
u = newUnixfsFile()
|
||||
u.links = newSeqOfCap[FileLink](1)
|
||||
for chunk in fStream.simpleChunks:
|
||||
let cid = store.put(chunk)
|
||||
u.links.add FileLink(cid: cid, size: chunk.len)
|
||||
u.size.inc chunk.len
|
||||
if u.size == 0:
|
||||
# return the CID for a raw nothing
|
||||
u.cid = dagHash("")
|
||||
else:
|
||||
if u.links.len == 1:
|
||||
# take a shortcut use the raw chunk CID
|
||||
u.cid = u.links[0].cid
|
||||
else:
|
||||
u.cid = store.putDag(u.toCbor)
|
||||
result = u
|
||||
close fStream
|
||||
|
||||
proc addDir*(store: DagfsStore; dirPath: string): FsNode =
|
||||
var dRoot = newFsRoot()
|
||||
for kind, path in walkDir dirPath:
|
||||
var child: FsNode
|
||||
case kind
|
||||
of pcFile:
|
||||
child = store.addFile path
|
||||
of pcDir:
|
||||
child = store.addDir(path)
|
||||
else: continue
|
||||
dRoot.add path.extractFilename, child
|
||||
let
|
||||
dag = dRoot.toCbor
|
||||
cid = store.putDag(dag)
|
||||
result = newUnixfsDir(cid)
|
||||
|
||||
proc open*(store: DagfsStore; cid: Cid): FsNode =
|
||||
assert cid.isValid
|
||||
let raw = store.get(cid)
|
||||
result = parseFs(raw, cid)
|
||||
|
||||
proc openDir*(store: DagfsStore; cid: Cid): FsNode =
|
||||
assert cid.isValid
|
||||
var raw = ""
|
||||
try: store.get(cid, raw)
|
||||
except MissingObject: raise cid.newMissingObject
|
||||
# this sucks
|
||||
result = parseFs(raw, cid)
|
||||
assert(result.kind == dirNode)
|
||||
|
||||
proc walk*(store: DagfsStore; dir: FsNode; path: string; cache = true): FsNode =
|
||||
## Walk a path down a root.
|
||||
assert(dir.kind == dirNode)
|
||||
result = dir
|
||||
var raw = ""
|
||||
for name in split(path, DirSep):
|
||||
if name == "": continue
|
||||
if result.kind == fileNode:
|
||||
result = nil
|
||||
break
|
||||
var next = result[name]
|
||||
if next.isNil:
|
||||
result = nil
|
||||
break
|
||||
if (next.kind in {shallowFile, shallowDir}):
|
||||
store.get(next.cid, raw)
|
||||
next = parseFs(raw, next.cid)
|
||||
if cache:
|
||||
result.entries[name] = next
|
||||
result = next
|
||||
|
||||
#[
|
||||
iterator fileChunks*(store: DagfsStore; file: FsNode): string =
|
||||
## Iterate over the links in a file and return futures for link data.
|
||||
if file.cid.isRaw:
|
||||
yield store.get(file.cid)
|
||||
else:
|
||||
var
|
||||
i = 0
|
||||
chunk = ""
|
||||
while i < file.links.len:
|
||||
store.get(file.links[i].cid, chunk)
|
||||
yield chunk
|
||||
inc i
|
||||
]#
|
||||
|
||||
proc readBuffer*(store: DagfsStore; file: FsNode; pos: BiggestInt;
|
||||
buf: pointer; size: int): int =
|
||||
## Read a UnixFS file into a buffer. May return zero for any failure.
|
||||
assert(pos > -1)
|
||||
var
|
||||
filePos = 0
|
||||
chunk = ""
|
||||
if pos < file.size:
|
||||
#[
|
||||
if file.cid.isRaw:
|
||||
let pos = pos.int
|
||||
store.get(file.cid, chunk)
|
||||
if pos < chunk.high:
|
||||
copyMem(buf, chunk[pos].addr, min(chunk.len - pos, size))
|
||||
result = size
|
||||
else:
|
||||
]#
|
||||
block:
|
||||
for i in 0..file.links.high:
|
||||
let linkSize = file.links[i].size
|
||||
if filePos <= pos and pos < filePos+linkSize:
|
||||
store.get(file.links[i].cid, chunk)
|
||||
let
|
||||
chunkPos = int(pos - filePos)
|
||||
n = min(chunk.len-chunkPos, size)
|
||||
copyMem(buf, chunk[chunkPos].addr, n)
|
||||
result = n
|
||||
break
|
||||
filePos.inc linkSize
|
|
@ -0,0 +1,80 @@
|
|||
import httpclient, json, base58/bitcoin, streams, cbor, tables
|
||||
|
||||
import ../dagfs, ./stores, ./fsnodes
|
||||
|
||||
type
|
||||
IpfsStore* = ref IpfsStoreObj
|
||||
IpfsStoreObj = object of DagfsStoreObj
|
||||
## IPFS daemon client.
|
||||
http: HttpClient
|
||||
baseUrl: string
|
||||
|
||||
proc ipfsClose(s: DagfsStore) =
|
||||
var ipfs = IpfsStore(s)
|
||||
close ipfs.http
|
||||
|
||||
proc putBlock(ipfs: IpfsStore; data: string; format = "raw"): tuple[key: string, size: int] =
|
||||
# stuff in some MIME horseshit so it works
|
||||
ipfs.http.headers = newHttpHeaders({
|
||||
"Content-Type": "multipart/form-data; boundary=------------------------KILL_A_WEBDEV"})
|
||||
let
|
||||
trash = """
|
||||
|
||||
--------------------------KILL_A_WEBDEV
|
||||
Content-Disposition: form-data; name="file"; filename="myfile"
|
||||
Content-Type: application/octet-stream
|
||||
|
||||
""" & data & """
|
||||
|
||||
--------------------------KILL_A_WEBDEV--
|
||||
"""
|
||||
resp = ipfs.http.post(ipfs.baseUrl & "/api/v0/block/put?format=" & format, body=trash)
|
||||
body = resp.body
|
||||
js = parseJson body
|
||||
# You can tell its written in Go when the JSON keys had to be capitalized
|
||||
result = (js["Key"].getStr, js["Size"].getInt)
|
||||
|
||||
proc ipfsPut(s: DagfsStore; blk: string): Cid =
|
||||
var ipfs = IpfsStore(s)
|
||||
let
|
||||
isDag = blk.isUnixfs
|
||||
tag = 0x55
|
||||
format = "raw"
|
||||
result = CidBlake2b256 blk
|
||||
discard ipfs.putBlock(blk, format)
|
||||
# IPFS returns a different hash. Whatever.
|
||||
|
||||
proc ipfsGetBuffer(s: DagfsStore; cid: Cid; buf: pointer; len: Natural): int =
|
||||
var ipfs = IpfsStore(s)
|
||||
let url = ipfs.baseUrl & "/api/v0/block/get?arg=" & $cid
|
||||
try:
|
||||
var body = ipfs.http.request(url).body
|
||||
if not verify(cid, body):
|
||||
raise newMissingObject cid
|
||||
if body.len > len:
|
||||
raise newException(BufferTooSmall, "")
|
||||
result = body.len
|
||||
copyMem(buf, body[0].addr, result)
|
||||
except:
|
||||
raise newMissingObject cid
|
||||
|
||||
proc ipfsGet(s: DagfsStore; cid: Cid; result: var string) =
|
||||
var ipfs = IpfsStore(s)
|
||||
let url = ipfs.baseUrl & "/api/v0/block/get?arg=" & $cid
|
||||
try:
|
||||
result = ipfs.http.request(url).body
|
||||
if not verify(cid, result):
|
||||
raise newMissingObject cid
|
||||
except:
|
||||
raise newMissingObject cid
|
||||
|
||||
proc newIpfsStore*(url = "http://127.0.0.1:5001"): IpfsStore =
|
||||
## Allocate a new synchronous store interface to the IPFS daemon at `url`.
|
||||
## Every block retrieved by `get` is hashed and verified.
|
||||
new result
|
||||
result.closeImpl = ipfsClose
|
||||
result.putImpl = ipfsPut
|
||||
result.getBufferImpl = ipfsGetBuffer
|
||||
result.getImpl = ipfsGet
|
||||
result.http = newHttpClient()
|
||||
result.baseUrl = url
|
|
@ -0,0 +1,165 @@
|
|||
type
|
||||
Blake2b* = object
|
||||
hash: array[8, uint64]
|
||||
offset: array[2, uint64]
|
||||
buffer: array[128, uint8]
|
||||
buffer_idx: uint8
|
||||
hash_size: uint8
|
||||
|
||||
const Blake2bIV =
|
||||
[ 0x6a09e667f3bcc908'u64, 0xbb67ae8584caa73b'u64,
|
||||
0x3c6ef372fe94f82b'u64, 0xa54ff53a5f1d36f1'u64,
|
||||
0x510e527fade682d1'u64, 0x9b05688c2b3e6c1f'u64,
|
||||
0x1f83d9abfb41bd6b'u64, 0x5be0cd19137e2179'u64 ]
|
||||
|
||||
const Sigma = [
|
||||
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 ],
|
||||
[ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 ],
|
||||
[ 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 ],
|
||||
[ 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 ],
|
||||
[ 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 ],
|
||||
[ 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 ],
|
||||
[ 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 ],
|
||||
[ 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 ],
|
||||
[ 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 ],
|
||||
[ 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 ],
|
||||
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 ],
|
||||
[ 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 ] ]
|
||||
|
||||
proc inc(a: var array[2, uint64], b: uint8) =
|
||||
a[0] = a[0] + b
|
||||
if (a[0] < b): inc(a[1])
|
||||
|
||||
proc padding(a: var array[128, uint8], b: uint8) =
|
||||
for i in b..127: a[i] = 0
|
||||
|
||||
proc ror64(x: uint64, n: int): uint64 {.inline.} =
|
||||
result = (x shr n) or (x shl (64 - n))
|
||||
|
||||
proc G (v: var array[16, uint64],
|
||||
a,b,c,d: int, x,y: uint64)
|
||||
{.inline.} =
|
||||
v[a] = v[a] + v[b] + x
|
||||
v[d] = ror64(v[d] xor v[a], 32)
|
||||
v[c] = v[c] + v[d]
|
||||
v[b] = ror64(v[b] xor v[c], 24)
|
||||
v[a] = v[a] + v[b] + y
|
||||
v[d] = ror64(v[d] xor v[a], 16)
|
||||
v[c] = v[c] + v[d]
|
||||
v[b] = ror64(v[b] xor v[c], 63)
|
||||
|
||||
proc compress(c: var Blake2b, last: int = 0) =
|
||||
var input, v: array[16, uint64]
|
||||
for i in 0..15:
|
||||
input[i] = cast[ptr uint64](addr(c.buffer[i*8]))[]
|
||||
for i in 0..7:
|
||||
v[i] = c.hash[i]
|
||||
v[i+8] = Blake2bIV[i]
|
||||
v[12] = v[12] xor c.offset[0]
|
||||
v[13] = v[13] xor c.offset[1]
|
||||
if (last == 1): v[14] = not(v[14])
|
||||
for i in 0..11:
|
||||
G(v, 0, 4, 8, 12, input[Sigma[i][0]], input[Sigma[i][1]])
|
||||
G(v, 1, 5, 9, 13, input[Sigma[i][2]], input[Sigma[i][3]])
|
||||
G(v, 2, 6, 10, 14, input[Sigma[i][4]], input[Sigma[i][5]])
|
||||
G(v, 3, 7, 11, 15, input[Sigma[i][6]], input[Sigma[i][7]])
|
||||
G(v, 0, 5, 10, 15, input[Sigma[i][8]], input[Sigma[i][9]])
|
||||
G(v, 1, 6, 11, 12, input[Sigma[i][10]], input[Sigma[i][11]])
|
||||
G(v, 2, 7, 8, 13, input[Sigma[i][12]], input[Sigma[i][13]])
|
||||
G(v, 3, 4, 9, 14, input[Sigma[i][14]], input[Sigma[i][15]])
|
||||
for i in 0..7:
|
||||
c.hash[i] = c.hash[i] xor v[i] xor v[i+8]
|
||||
c.buffer_idx = 0
|
||||
|
||||
proc blake2b_update*(c: var Blake2b, data: cstring|string|seq|uint8, data_size: int) =
|
||||
for i in 0..<data_size:
|
||||
if c.buffer_idx == 128:
|
||||
inc(c.offset, c.buffer_idx)
|
||||
compress(c)
|
||||
when data is cstring or data is string:
|
||||
c.buffer[c.buffer_idx] = data[i].uint8
|
||||
elif data is seq:
|
||||
c.buffer[c.buffer_idx] = data[i]
|
||||
else:
|
||||
c.buffer[c.buffer_idx] = data
|
||||
inc(c.buffer_idx)
|
||||
|
||||
proc blake2b_init*(c: var Blake2b, hash_size: uint8,
|
||||
key: cstring = nil, key_size: int = 0) =
|
||||
assert(hash_size >= 1'u8 and hash_size <= 64'u8)
|
||||
assert(key_size >= 0 and key_size <= 64)
|
||||
c.hash = Blake2bIV
|
||||
c.hash[0] = c.hash[0] xor 0x01010000 xor cast[uint64](key_size shl 8) xor hash_size
|
||||
c.hash_size = hash_size
|
||||
if key_size > 0:
|
||||
blake2b_update(c, key, key_size)
|
||||
padding(c.buffer, c.buffer_idx)
|
||||
c.buffer_idx = 128
|
||||
|
||||
proc blake2b_final*(c: var Blake2b): seq[uint8] =
|
||||
result = newSeq[uint8](c.hash_size)
|
||||
inc(c.offset, c.buffer_idx)
|
||||
padding(c.buffer, c.buffer_idx)
|
||||
compress(c, 1)
|
||||
for i in 0'u8..<c.hash_size:
|
||||
result[i.int] = cast[uint8]((c.hash[i div 8] shr (8'u8 * (i and 7)) and 0xFF))
|
||||
zeroMem(addr(c), sizeof(c))
|
||||
|
||||
proc `$`*(d: seq[uint8]): string =
|
||||
const digits = "0123456789abcdef"
|
||||
result = ""
|
||||
for i in 0..high(d):
|
||||
add(result, digits[(d[i].int shr 4) and 0xF])
|
||||
add(result, digits[d[i].int and 0xF])
|
||||
|
||||
proc getBlake2b*(s: string, hash_size: uint8, key: string = ""): string =
|
||||
var b: Blake2b
|
||||
blake2b_init(b, hash_size, cstring(key), len(key))
|
||||
blake2b_update(b, s, len(s))
|
||||
result = $blake2b_final(b)
|
||||
|
||||
when isMainModule:
|
||||
import strutils, hex
|
||||
|
||||
proc hex2str(s: string): string =
|
||||
hex.decode s
|
||||
|
||||
assert(getBlake2b("abc", 4, "abc") == "b8f97209")
|
||||
assert(getBlake2b(nil, 4, "abc") == "8ef2d47e")
|
||||
assert(getBlake2b("abc", 4) == "63906248")
|
||||
assert(getBlake2b(nil, 4) == "1271cf25")
|
||||
|
||||
var b1, b2: Blake2b
|
||||
blake2b_init(b1, 4)
|
||||
blake2b_init(b2, 4)
|
||||
blake2b_update(b1, 97'u8, 1)
|
||||
blake2b_update(b1, 98'u8, 1)
|
||||
blake2b_update(b1, 99'u8, 1)
|
||||
blake2b_update(b2, @[97'u8, 98'u8, 99'u8], 3)
|
||||
assert($blake2b_final(b1) == $blake2b_final(b2))
|
||||
|
||||
let f = open("blake2b-kat.txt", fmRead)
|
||||
var
|
||||
data, key, hash, r: string
|
||||
b: Blake2b
|
||||
while true:
|
||||
try:
|
||||
data = f.readLine()
|
||||
data = hex2str(data[4.int..data.high])
|
||||
key = f.readLine()
|
||||
key = hex2str(key[5..key.high])
|
||||
hash = f.readLine()
|
||||
hash = hash[6..hash.high]
|
||||
r = getBlake2b(data, 64, key)
|
||||
assert(r == hash)
|
||||
|
||||
blake2b_init(b, 64, key, 64)
|
||||
for i in 0..high(data):
|
||||
blake2b_update(b, ($data[i]).cstring, 1)
|
||||
assert($blake2b_final(b) == hash)
|
||||
|
||||
discard f.readLine()
|
||||
except IOError: break
|
||||
close(f)
|
||||
echo "ok"
|
||||
|
|
@ -0,0 +1,74 @@
|
|||
#[
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Eric S. Bullington
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
]#
|
||||
|
||||
proc nibbleFromChar(c: char): int =
|
||||
case c
|
||||
of '0'..'9': result = (ord(c) - ord('0'))
|
||||
of 'a'..'f': result = (ord(c) - ord('a') + 10)
|
||||
of 'A'..'F': result = (ord(c) - ord('A') + 10)
|
||||
else:
|
||||
raise newException(ValueError, "invalid hexadecimal encoding")
|
||||
|
||||
proc decode*[T: char|int8|uint8](str: string; result: var openArray[T]) =
|
||||
assert(result.len == str.len div 2)
|
||||
for i in 0..<result.len:
|
||||
result[i] = T((nibbleFromChar(str[2 * i]) shl 4) or nibbleFromChar(str[2 * i + 1]))
|
||||
|
||||
proc decode*(str: string): string =
|
||||
result = newString(len(str) div 2)
|
||||
decode(str, result)
|
||||
|
||||
proc nibbleToChar(nibble: int): char =
|
||||
const byteMap = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']
|
||||
const byteMapLen = len(byteMap)
|
||||
if nibble < byteMapLen:
|
||||
return byteMap[nibble];
|
||||
|
||||
template encodeTmpl(str: untyped): typed =
|
||||
let length = (len(str))
|
||||
result = newString(length * 2)
|
||||
for i in str.low..str.high:
|
||||
let a = ord(str[i]) shr 4
|
||||
let b = ord(str[i]) and ord(0x0f)
|
||||
result[i * 2] = nibbleToChar(a)
|
||||
result[i * 2 + 1] = nibbleToChar(b)
|
||||
|
||||
proc encode*(bin: string): string =
|
||||
encodeTmpl(bin)
|
||||
|
||||
proc encode*(bin: openarray[char|int8|uint8]): string =
|
||||
encodeTmpl(bin)
|
||||
|
||||
when isMainModule:
|
||||
assert encode("The sun so bright it leaves no shadows") == "5468652073756e20736f20627269676874206974206c6561766573206e6f20736861646f7773"
|
||||
const longText = """Man is distinguished, not only by his reason, but by this
|
||||
singular passion from other animals, which is a lust of the mind,
|
||||
that by a perseverance of delight in the continued and indefatigable
|
||||
generation of knowledge, exceeds the short vehemence of any carnal
|
||||
pleasure."""
|
||||
assert encode(longText) == "4d616e2069732064697374696e677569736865642c206e6f74206f6e6c792062792068697320726561736f6e2c2062757420627920746869730a2020202073696e67756c61722070617373696f6e2066726f6d206f7468657220616e696d616c732c2077686963682069732061206c757374206f6620746865206d696e642c0a20202020746861742062792061207065727365766572616e6365206f662064656c6967687420696e2074686520636f6e74696e75656420616e6420696e6465666174696761626c650a2020202067656e65726174696f6e206f66206b6e6f776c656467652c2065786365656473207468652073686f727420766568656d656e6365206f6620616e79206361726e616c0a20202020706c6561737572652e"
|
||||
const tests = ["", "abc", "xyz", "man", "leisure.", "sure.", "erasure.",
|
||||
"asure.", longText]
|
||||
for t in items(tests):
|
||||
assert decode(encode(t)) == t
|
|
@ -0,0 +1,54 @@
|
|||
import std/streams, std/strutils, std/os, cbor
|
||||
import ../dagfs, ./stores
|
||||
|
||||
type
|
||||
DagfsReplicator* = ref DagfsReplicatorObj
|
||||
DagfsReplicatorObj* = object of DagfsStoreObj
|
||||
toStore, fromStore: DagfsStore
|
||||
cache: string
|
||||
cacheCid: Cid
|
||||
|
||||
proc replicatedPut(s: DagfsStore; blk: string): Cid =
|
||||
var r = DagfsReplicator(s)
|
||||
r.toStore.put blk
|
||||
|
||||
proc replicatedGetBuffer(s: DagfsStore; cid: Cid; buf: pointer; len: Natural): int =
|
||||
var r = DagfsReplicator(s)
|
||||
if r.cacheCid == cid:
|
||||
assert(cid.verify(r.cache), "cached block is invalid from previous get")
|
||||
if r.cache.len > len:
|
||||
raise newException(BufferTooSmall, "")
|
||||
result = r.cache.len
|
||||
copyMem(buf, r.cache[0].addr, result)
|
||||
else:
|
||||
try:
|
||||
result = r.toStore.getBuffer(cid, buf, len)
|
||||
r.cacheCid = cid
|
||||
r.cache.setLen result
|
||||
copyMem(r.cache[0].addr, buf, result)
|
||||
assert(cid.verify(r.cache), "cached block is invalid after copy from To store")
|
||||
except MissingObject:
|
||||
result = r.fromStore.getBuffer(cid, buf, len)
|
||||
r.cacheCid = cid
|
||||
r.cache.setLen result
|
||||
copyMem(r.cache[0].addr, buf, result)
|
||||
assert(cid.verify(r.cache), "replicate cache is invalid after copy from From store")
|
||||
discard r.toStore.put r.cache
|
||||
|
||||
proc replicatedGet(s: DagfsStore; cid: Cid; result: var string) =
|
||||
var r = DagfsReplicator(s)
|
||||
try: r.toStore.get(cid, result)
|
||||
except MissingObject:
|
||||
r.fromStore.get(cid, result)
|
||||
discard r.toStore.put result
|
||||
|
||||
proc newDagfsReplicator*(toStore, fromStore: DagfsStore): DagfsReplicator =
|
||||
## Blocks retrieved by `get` are not verified.
|
||||
DagfsReplicator(
|
||||
putImpl: replicatedPut,
|
||||
getBufferImpl: replicatedGetBuffer,
|
||||
getImpl: replicatedGet,
|
||||
toStore: toStore,
|
||||
fromStore: fromStore,
|
||||
cache: "",
|
||||
cacheCid: initCid())
|
|
@ -0,0 +1,130 @@
|
|||
import std/streams, std/strutils, std/os
|
||||
import cbor
|
||||
import ../dagfs, ./priv/hex
|
||||
|
||||
type
|
||||
MissingObject* = ref object of CatchableError
|
||||
cid*: Cid ## Missing object identifier
|
||||
|
||||
BufferTooSmall* = object of CatchableError
|
||||
|
||||
proc newMissingObject*(cid: Cid): MissingObject =
|
||||
MissingObject(msg: "object missing from store", cid: cid)
|
||||
|
||||
type
|
||||
DagfsStore* = ref DagfsStoreObj
|
||||
DagfsStoreObj* = object of RootObj
|
||||
closeImpl*: proc (s: DagfsStore) {.nimcall, gcsafe.}
|
||||
putImpl*: proc (s: DagfsStore; blk: string): Cid {.nimcall, gcsafe.}
|
||||
getBufferImpl*: proc (s: DagfsStore; cid: Cid; buf: pointer; len: Natural): int {.nimcall, gcsafe.}
|
||||
getImpl*: proc (s: DagfsStore; cid: Cid; result: var string) {.nimcall, gcsafe.}
|
||||
|
||||
proc close*(s: DagfsStore) =
|
||||
## Close active store resources.
|
||||
if not s.closeImpl.isNil: s.closeImpl(s)
|
||||
|
||||
proc put*(s: DagfsStore; blk: string): Cid =
|
||||
## Place a raw block to the store. The hash argument specifies a required
|
||||
## hash algorithm, or defaults to a algorithm choosen by the store
|
||||
## implementation.
|
||||
assert(not s.putImpl.isNil)
|
||||
assert(blk.len > 0)
|
||||
s.putImpl(s, blk)
|
||||
|
||||
proc getBuffer*(s: DagfsStore; cid: Cid; buf: pointer; len: Natural): int =
|
||||
## Copy a raw block from the store into a buffer pointer.
|
||||
assert cid.isValid
|
||||
assert(not s.getBufferImpl.isNil)
|
||||
result = s.getBufferImpl(s, cid, buf, len)
|
||||
assert(result > 0)
|
||||
|
||||
proc get*(s: DagfsStore; cid: Cid; result: var string) =
|
||||
## Retrieve a raw block from the store.
|
||||
assert(not s.getImpl.isNil)
|
||||
assert cid.isValid
|
||||
s.getImpl(s, cid, result)
|
||||
assert(result.len > 0)
|
||||
|
||||
proc get*(s: DagfsStore; cid: Cid): string =
|
||||
## Retrieve a raw block from the store.
|
||||
result = ""
|
||||
s.get(cid, result)
|
||||
|
||||
proc putDag*(s: DagfsStore; dag: CborNode): Cid =
|
||||
## Place an Dagfs node in the store.
|
||||
var raw = encode dag
|
||||
s.put raw
|
||||
|
||||
proc getDag*(s: DagfsStore; cid: Cid): CborNode =
|
||||
## Retrieve an CBOR DAG from the store.
|
||||
let stream = newStringStream(s.get(cid))
|
||||
result = parseCbor stream
|
||||
close stream
|
||||
|
||||
type
|
||||
FileStore* = ref FileStoreObj
|
||||
## A store that writes nodes and leafs as files.
|
||||
FileStoreObj = object of DagfsStoreObj
|
||||
root: string
|
||||
|
||||
proc parentAndFile(fs: FileStore; cid: Cid): (string, string) =
|
||||
## Generate the parent path and file path of CID within the store.
|
||||
let digest = hex.encode(cid.digest)
|
||||
result[0] = fs.root / digest[0..1]
|
||||
result[1] = result[0] / digest[2..digest.high]
|
||||
|
||||
proc fsPut(s: DagfsStore; blk: string): Cid =
|
||||
var fs = FileStore(s)
|
||||
result = dagHash blk
|
||||
if result != zeroBlock:
|
||||
let (dir, path) = fs.parentAndFile(result)
|
||||
if not existsDir dir:
|
||||
createDir dir
|
||||
if not existsFile path:
|
||||
let
|
||||
tmp = fs.root / "tmp"
|
||||
writeFile(tmp, blk)
|
||||
moveFile(tmp, path)
|
||||
|
||||
proc fsGetBuffer(s: DagfsStore; cid: Cid; buf: pointer; len: Natural): int =
|
||||
var fs = FileStore(s)
|
||||
let (_, path) = fs.parentAndFile cid
|
||||
if existsFile path:
|
||||
let fSize = path.getFileSize
|
||||
if fSize > maxBlockSize:
|
||||
discard tryRemoveFile path
|
||||
raise cid.newMissingObject
|
||||
if fSize > len.int64:
|
||||
raise newException(BufferTooSmall, "")
|
||||
let file = open(path, fmRead)
|
||||
result = file.readBuffer(buf, len)
|
||||
close file
|
||||
if result == 0:
|
||||
raise cid.newMissingObject
|
||||
|
||||
proc fsGet(s: DagfsStore; cid: Cid; result: var string) =
|
||||
var fs = FileStore(s)
|
||||
let (_, path) = fs.parentAndFile cid
|
||||
if existsFile path:
|
||||
let fSize = path.getFileSize
|
||||
if fSize > maxBlockSize:
|
||||
discard tryRemoveFile path
|
||||
raise cid.newMissingObject
|
||||
result.setLen fSize.int
|
||||
let
|
||||
file = open(path, fmRead)
|
||||
n = file.readChars(result, 0, result.len)
|
||||
close file
|
||||
doAssert(n == result.len)
|
||||
else:
|
||||
raise cid.newMissingObject
|
||||
|
||||
proc newFileStore*(root: string): FileStore =
|
||||
## Blocks retrieved by `get` are not hashed and verified.
|
||||
if not existsDir(root):
|
||||
createDir root
|
||||
new result
|
||||
result.putImpl = fsPut
|
||||
result.getBufferImpl = fsGetBuffer
|
||||
result.getImpl = fsGet
|
||||
result.root = root
|
|
@ -0,0 +1,185 @@
|
|||
import std/asyncnet, std/asyncdispatch, std/streams, cbor
|
||||
import ../dagfs, ./stores
|
||||
|
||||
proc toInt(chars: openArray[char]): BiggestInt =
|
||||
for c in chars.items:
|
||||
result = (result shl 8) or c.BiggestInt
|
||||
|
||||
const
|
||||
defaultPort = Port(1024)
|
||||
errTag = toInt "err"
|
||||
getTag = toInt "get"
|
||||
putTag = toInt "put"
|
||||
|
||||
type
|
||||
TcpServer* = ref TcpServerObj
|
||||
TcpServerObj = object
|
||||
sock: AsyncSocket
|
||||
store: DagfsStore
|
||||
|
||||
proc newTcpServer*(store: DagfsStore; port = defaultPort): TcpServer =
|
||||
## Create a new TCP server that serves `store`.
|
||||
result = TcpServer(sock: newAsyncSocket(buffered=false), store: store)
|
||||
result.sock.bindAddr(port, "127.0.0.1")
|
||||
result.sock.setSockOpt(OptReuseAddr, true)
|
||||
# some braindead unix cruft
|
||||
|
||||
proc process(server: TcpServer; client: AsyncSocket) {.async.} =
|
||||
## Process messages from a TCP client.
|
||||
var
|
||||
tmpBuf = ""
|
||||
blkBuf = ""
|
||||
block loop:
|
||||
while not client.isClosed:
|
||||
block:
|
||||
tmpBuf.setLen(256)
|
||||
let n = await client.recvInto(addr tmpBuf[0], tmpBuf.len)
|
||||
if n < 40: break loop
|
||||
tmpBuf.setLen n
|
||||
let
|
||||
tmpStream = newStringStream(tmpBuf)
|
||||
cmd = parseCbor tmpStream
|
||||
when defined(tcpDebug):
|
||||
echo "C: ", cmd
|
||||
if cmd.kind != cborArray or cmd.seq.len < 3: break loop
|
||||
case cmd[0].getInt
|
||||
of errTag:
|
||||
break loop
|
||||
of getTag:
|
||||
let
|
||||
cid = cmd[1].toCid
|
||||
resp = newCborArray()
|
||||
try:
|
||||
server.store.get(cid, blkBuf)
|
||||
resp.add(putTag)
|
||||
resp.add(cmd[1])
|
||||
resp.add(blkBuf.len)
|
||||
when defined(tcpDebug):
|
||||
echo "S: ", resp
|
||||
await client.send(encode resp)
|
||||
await client.send(blkBuf)
|
||||
except:
|
||||
resp.add(errTag)
|
||||
resp.add(cmd[1])
|
||||
resp.add(getCurrentExceptionMsg())
|
||||
when defined(tcpDebug):
|
||||
echo "S: ", resp
|
||||
await client.send(encode resp)
|
||||
of putTag:
|
||||
# TODO: check if the block is already in the store
|
||||
let resp = newCborArray()
|
||||
resp.add(newCborInt getTag)
|
||||
resp.add(cmd[1])
|
||||
resp.add(cmd[2])
|
||||
when defined(tcpDebug):
|
||||
echo "S: ", resp
|
||||
await client.send(encode resp)
|
||||
doAssert(cmd[2].getInt <= maxBlockSize)
|
||||
tmpBuf.setLen cmd[2].getInt
|
||||
blkBuf.setLen 0
|
||||
while blkBuf.len < cmd[2].getInt:
|
||||
let n = await client.recvInto(tmpBuf[0].addr, tmpBuf.len)
|
||||
if n == 0: break loop
|
||||
tmpBuf.setLen n
|
||||
blkBuf.add tmpBuf
|
||||
let cid = server.store.put(blkBuf)
|
||||
doAssert(cid == cmd[1].toCid)
|
||||
else: break loop
|
||||
close client
|
||||
|
||||
proc serve*(server: TcpServer) {.async.} =
|
||||
## Service client connections to server.
|
||||
listen server.sock
|
||||
while not server.sock.isClosed:
|
||||
let (host, sock) = await server.sock.acceptAddr()
|
||||
asyncCheck server.process(sock)
|
||||
|
||||
proc close*(server: TcpServer) =
|
||||
## Close a TCP server.
|
||||
close server.sock
|
||||
|
||||
type
|
||||
TcpClient* = ref TcpClientObj
|
||||
TcpClientObj = object of DagfsStoreObj
|
||||
sock: AsyncSocket
|
||||
buf: string
|
||||
|
||||
proc tcpClientPut(s: DagfsStore; blk: string): Cid =
|
||||
var client = TcpClient(s)
|
||||
result = dagHash blk
|
||||
if result != zeroBlock:
|
||||
block put:
|
||||
let cmd = newCborArray()
|
||||
cmd.add(newCborInt putTag)
|
||||
cmd.add(toCbor result)
|
||||
cmd.add(newCborInt blk.len)
|
||||
when defined(tcpDebug):
|
||||
echo "C: ", cmd
|
||||
waitFor client.sock.send(encode cmd)
|
||||
block get:
|
||||
let
|
||||
respBuf = waitFor client.sock.recv(256)
|
||||
s = newStringStream(respBuf)
|
||||
resp = parseCbor s
|
||||
when defined(tcpDebug):
|
||||
echo "S: ", resp
|
||||
case resp[0].getInt
|
||||
of getTag:
|
||||
if resp[1] == result:
|
||||
waitFor client.sock.send(blk)
|
||||
else:
|
||||
close client.sock
|
||||
raiseAssert "server sent out-of-order \"get\" message"
|
||||
of errTag:
|
||||
raiseAssert resp[2].getText
|
||||
else:
|
||||
raiseAssert "invalid server message"
|
||||
|
||||
proc tcpClientGetBuffer(s: DagfsStore; cid: Cid; buf: pointer; len: Natural): int =
|
||||
assert(getTag != 0)
|
||||
var client = TcpClient(s)
|
||||
block get:
|
||||
let cmd = newCborArray()
|
||||
cmd.add(newCborInt getTag)
|
||||
cmd.add(toCbor cid)
|
||||
cmd.add(newCborInt len)
|
||||
when defined(tcpDebug):
|
||||
echo "C: ", cmd
|
||||
waitFor client.sock.send(encode cmd)
|
||||
block put:
|
||||
let
|
||||
respBuf = waitFor client.sock.recv(256, {Peek})
|
||||
s = newStringStream(respBuf)
|
||||
resp = parseCbor s
|
||||
skip = s.getPosition
|
||||
when defined(tcpDebug):
|
||||
echo "S: ", resp
|
||||
case resp[0].getInt
|
||||
of putTag:
|
||||
doAssert(resp[1] == cid)
|
||||
result = resp[2].getInt.int
|
||||
doAssert(skip <= len and result <= len)
|
||||
discard waitFor client.sock.recvInto(buf, skip)
|
||||
result = waitFor client.sock.recvInto(buf, result)
|
||||
of errTag:
|
||||
raise MissingObject(msg: resp[2].getText, cid: cid)
|
||||
else:
|
||||
raise cid.newMissingObject
|
||||
|
||||
proc tcpClientGet(s: DagfsStore; cid: Cid; result: var string) =
|
||||
result.setLen maxBlockSize
|
||||
let n = s.getBuffer(cid, result[0].addr, result.len)
|
||||
result.setLen n
|
||||
assert(result.dagHash == cid)
|
||||
|
||||
proc newTcpClient*(host: string; port = defaultPort): TcpClient =
|
||||
new result
|
||||
result.sock = waitFor asyncnet.dial(host, port, buffered=false)
|
||||
result.buf = ""
|
||||
result.putImpl = tcpClientPut
|
||||
result.getBufferImpl = tcpClientGetBuffer
|
||||
result.getImpl = tcpClientGet
|
||||
|
||||
proc close*(client: TcpClient) =
|
||||
## Close a TCP client connection.
|
||||
close client.sock
|
|
@ -0,0 +1,593 @@
|
|||
import nre, os, strutils, tables, parseopt, streams, cbor
|
||||
|
||||
import ./dagfs, ./dagfs/stores, ./dagfs/fsnodes
|
||||
|
||||
type
|
||||
EvalError = object of CatchableError
|
||||
|
||||
type
|
||||
Env = ref EnvObj
|
||||
|
||||
AtomKind = enum
|
||||
atomPath
|
||||
atomCid
|
||||
atomString
|
||||
atomSymbol
|
||||
atomError
|
||||
|
||||
Atom = object
|
||||
case kind: AtomKind
|
||||
of atomPath:
|
||||
path: string
|
||||
of atomCid:
|
||||
cid: Cid
|
||||
of atomString:
|
||||
str: string
|
||||
of atomSymbol:
|
||||
sym: string
|
||||
of atomError:
|
||||
err: string
|
||||
|
||||
Func = proc(env: Env; arg: NodeObj): NodeRef
|
||||
|
||||
NodeKind = enum
|
||||
nodeError
|
||||
nodeList
|
||||
nodeAtom
|
||||
nodeFunc
|
||||
|
||||
NodeRef = ref NodeObj
|
||||
## NodeRef is used to chain nodes into lists.
|
||||
NodeObj = object
|
||||
## NodeObj is used to mutate nodes without side-effects.
|
||||
case kind: NodeKind
|
||||
of nodeList:
|
||||
headRef, tailRef: NodeRef
|
||||
of nodeAtom:
|
||||
atom: Atom
|
||||
of nodeFunc:
|
||||
fun: Func
|
||||
name: string
|
||||
of nodeError:
|
||||
errMsg: string
|
||||
errNode: NodeRef
|
||||
nextRef: NodeRef
|
||||
|
||||
EnvObj = object
|
||||
store: DagfsStore
|
||||
bindings: Table[string, NodeObj]
|
||||
paths: Table[string, FsNode]
|
||||
cids: Table[Cid, FsNode]
|
||||
|
||||
proc print(a: Atom; s: Stream)
|
||||
proc print(ast: NodeRef; s: Stream)
|
||||
|
||||
proc newAtom(c: Cid): Atom =
|
||||
Atom(kind: atomCid, cid: c)
|
||||
|
||||
proc newAtomError(msg: string): Atom =
|
||||
Atom(kind: atomError, err: msg)
|
||||
|
||||
proc newAtomPath(s: string): Atom =
|
||||
try:
|
||||
let path = expandFilename s
|
||||
Atom(kind: atomPath, path: path)
|
||||
except OSError:
|
||||
newAtomError("invalid path '$1'" % s)
|
||||
|
||||
proc newAtomString(s: string): Atom =
|
||||
Atom(kind: atomString, str: s)
|
||||
|
||||
proc newNodeError(msg: string; n: NodeObj): NodeRef =
|
||||
var p = new NodeRef
|
||||
p[] = n
|
||||
NodeRef(kind: nodeError, errMsg: msg, errNode: p)
|
||||
|
||||
proc newNode(a: Atom): NodeRef =
|
||||
NodeRef(kind: nodeAtom, atom: a)
|
||||
|
||||
proc newNodeList(): NodeRef =
|
||||
NodeRef(kind: nodeList)
|
||||
|
||||
proc next(n: NodeObj | NodeRef): NodeObj =
|
||||
## Return a copy of list element that follows Node n.
|
||||
assert(not n.nextRef.isNil, "next element is nil")
|
||||
result = n.nextRef[]
|
||||
|
||||
proc head(list: NodeObj | NodeRef): NodeObj =
|
||||
## Return the start element of a list Node.
|
||||
list.headRef[]
|
||||
|
||||
proc `next=`(n, p: NodeRef) =
|
||||
## Return a copy of list element that follows Node n.
|
||||
assert(n.nextRef.isNil, "append to node that is not at the end of a list")
|
||||
n.nextRef = p
|
||||
|
||||
iterator list(n: NodeObj): NodeObj =
|
||||
## Iterate over members of a list node.
|
||||
var n = n.headRef
|
||||
while not n.isNil:
|
||||
yield n[]
|
||||
n = n.nextRef
|
||||
|
||||
iterator walk(n: NodeObj): NodeObj =
|
||||
## Walk down the singly linked list starting from a member node.
|
||||
var n = n
|
||||
while not n.nextRef.isNil:
|
||||
yield n
|
||||
n = n.nextRef[]
|
||||
yield n
|
||||
|
||||
proc append(list, n: NodeRef) =
|
||||
## Append a node to the end of a list node.
|
||||
if list.headRef.isNil:
|
||||
list.headRef = n
|
||||
list.tailRef = n
|
||||
else:
|
||||
list.tailRef.next = n
|
||||
while not list.tailRef.nextRef.isNil:
|
||||
assert(list.tailRef != list.tailRef.nextRef)
|
||||
list.tailRef = list.tailRef.nextRef
|
||||
|
||||
proc append(list: NodeRef; n: NodeObj) =
|
||||
let p = new NodeRef
|
||||
p[] = n
|
||||
list.append p
|
||||
|
||||
proc getFile(env: Env; path: string): FsNode =
|
||||
result = env.paths.getOrDefault path
|
||||
if result.isNil:
|
||||
result = env.store.addFile(path)
|
||||
assert(not result.isNil)
|
||||
env.paths[path] = result
|
||||
|
||||
proc getDir(env: Env; path: string): FsNode =
|
||||
result = env.paths.getOrDefault path
|
||||
if result.isNil:
|
||||
result = env.store.addDir(path)
|
||||
assert(not result.isNil)
|
||||
env.paths[path] = result
|
||||
|
||||
proc getUnixfs(env: Env; cid: Cid): FsNode =
|
||||
assert cid.isValid
|
||||
result = env.cids.getOrDefault cid
|
||||
if result.isNil:
|
||||
var raw = ""
|
||||
env.store.get(cid, raw)
|
||||
result = parseFs(raw, cid)
|
||||
env.cids[cid] = result
|
||||
|
||||
type
|
||||
Tokens = seq[string]
|
||||
|
||||
Reader = ref object
|
||||
buffer: string
|
||||
tokens: Tokens
|
||||
pos: int
|
||||
|
||||
proc newReader(): Reader =
|
||||
Reader(buffer: "", tokens: newSeq[string]())
|
||||
|
||||
proc next(r: Reader): string =
|
||||
assert(r.pos < r.tokens.len, $r.tokens)
|
||||
result = r.tokens[r.pos]
|
||||
inc r.pos
|
||||
|
||||
proc peek(r: Reader): string =
|
||||
assert(r.pos < r.tokens.len, $r.tokens)
|
||||
r.tokens[r.pos]
|
||||
|
||||
proc print(a: Atom; s: Stream) =
|
||||
case a.kind
|
||||
of atomPath:
|
||||
s.write a.path
|
||||
of atomCid:
|
||||
s.write $a.cid
|
||||
of atomString:
|
||||
s.write '"'
|
||||
s.write a.str
|
||||
s.write '"'
|
||||
#[
|
||||
of atomData:
|
||||
let fut = newFutureStream[string]()
|
||||
asyncCheck env.store.fileStream(a.fileCid, fut)
|
||||
while true:
|
||||
let (valid, chunk) = fut.read()
|
||||
if not valid: break
|
||||
f.write chunk
|
||||
]#
|
||||
of atomSymbol:
|
||||
s.write a.sym
|
||||
of atomError:
|
||||
s.write "«"
|
||||
s.write a.err
|
||||
s.write "»"
|
||||
|
||||
proc print(ast: NodeObj; s: Stream) =
|
||||
case ast.kind:
|
||||
of nodeAtom:
|
||||
ast.atom.print(s)
|
||||
of nodeList:
|
||||
s.write "\n("
|
||||
for n in ast.list:
|
||||
s.write " "
|
||||
n.print(s)
|
||||
s.write ")"
|
||||
of nodeFunc:
|
||||
s.write "#<procedure "
|
||||
s.write ast.name
|
||||
s.write ">"
|
||||
of nodeError:
|
||||
s.write "«"
|
||||
s.write ast.errMsg
|
||||
s.write ": "
|
||||
ast.errNode.print s
|
||||
s.write "»"
|
||||
|
||||
proc print(ast: NodeRef; s: Stream) =
|
||||
if ast.isNil:
|
||||
s.write "«nil»"
|
||||
else:
|
||||
ast[].print s
|
||||
|
||||
proc readAtom(r: Reader): Atom =
|
||||
let token = r.next
|
||||
block:
|
||||
if token[token.low] == '"':
|
||||
if token[token.high] != '"':
|
||||
newAtomError("invalid string '$1'" % token)
|
||||
else:
|
||||
newAtomString(token[1..token.len-2])
|
||||
elif token.contains DirSep:
|
||||
# TODO: memoize this, store a table of paths to atoms
|
||||
newAtomPath token
|
||||
elif token.len == 46 or token.len > 48:
|
||||
Atom(kind: atomCid, cid: token.parseCid)
|
||||
else:
|
||||
Atom(kind: atomSymbol, sym: token.normalize)
|
||||
#except:
|
||||
# newAtomError(getCurrentExceptionMsg())
|
||||
|
||||
proc readForm(r: Reader): NodeRef
|
||||
|
||||
proc readList(r: Reader): NodeRef =
|
||||
result = newNodeList()
|
||||
while true:
|
||||
if (r.pos == r.tokens.len):
|
||||
return nil
|
||||
let p = r.peek
|
||||
case p[p.high]
|
||||
of ')':
|
||||
discard r.next
|
||||
break
|
||||
else:
|
||||
result.append r.readForm
|
||||
|
||||
proc readForm(r: Reader): NodeRef =
|
||||
case r.peek[0]
|
||||
of '(':
|
||||
discard r.next
|
||||
r.readList
|
||||
else:
|
||||
r.readAtom.newNode
|
||||
|
||||
proc tokenizer(s: string): Tokens =
|
||||
# TODO: this sucks
|
||||
let tokens = s.findAll(re"""[\s,]*(~@|[\[\]{}()'`~^@]|"(?:\\.|[^\\"])*"|;.*|[^\s\[\]{}('"`,;)]*)""")
|
||||
result = newSeqOfCap[string] tokens.len
|
||||
for s in tokens:
|
||||
let t = s.strip(leading = true, trailing = false).strip(leading = false, trailing = true)
|
||||
if t.len > 0:
|
||||
result.add t
|
||||
|
||||
proc read(r: Reader; line: string): NodeRef =
|
||||
r.pos = 0
|
||||
if r.buffer.len > 0:
|
||||
r.buffer.add " "
|
||||
r.buffer.add line
|
||||
r.tokens = r.buffer.tokenizer
|
||||
else:
|
||||
r.tokens = line.tokenizer
|
||||
result = r.readForm
|
||||
if result.isNil:
|
||||
r.buffer = line
|
||||
else:
|
||||
r.buffer.setLen 0
|
||||
|
||||
proc assertArgCount(args: NodeObj; len: int) =
|
||||
var arg = args
|
||||
for _ in 2..len:
|
||||
doAssert(not arg.nextRef.isNil)
|
||||
arg = arg.next
|
||||
doAssert(arg.nextRef.isNil)
|
||||
|
||||
##
|
||||
# Builtin functions
|
||||
#
|
||||
|
||||
proc applyFunc(env: Env; args: NodeObj): NodeRef =
|
||||
assertArgCount(args, 2)
|
||||
let
|
||||
fn = args
|
||||
ln = fn.next
|
||||
fn.fun(env, ln.head)
|
||||
|
||||
proc cborFunc(env: Env; arg: NodeObj): NodeRef =
|
||||
assertArgCount(arg, 1)
|
||||
let
|
||||
a = arg.atom
|
||||
ufsNode = env.getUnixfs a.cid
|
||||
diag = $ufsNode.toCbor
|
||||
diag.newAtomString.newNode
|
||||
|
||||
proc copyFunc(env: Env; args: NodeObj): NodeRef =
|
||||
assertArgCount(args, 3)
|
||||
let
|
||||
x = args
|
||||
y = x.next
|
||||
z = y.next
|
||||
var root = newFsRoot()
|
||||
let dir = env.getUnixfs x.atom.cid
|
||||
for name, node in dir.items:
|
||||
root.add(name, node)
|
||||
root.add(z.atom.str, dir[y.atom.str])
|
||||
let cid = env.store.putDag(root.toCbor)
|
||||
cid.newAtom.newNode
|
||||
|
||||
proc consFunc(env: Env; args: NodeObj): NodeRef =
|
||||
assertArgCount(args, 2)
|
||||
result = newNodeList()
|
||||
let
|
||||
car = args
|
||||
cdr = args.next
|
||||
result.append car
|
||||
result.append cdr.head
|
||||
|
||||
proc defineFunc(env: Env; args: NodeObj): NodeRef =
|
||||
assertArgCount(args, 2)
|
||||
let
|
||||
symN = args
|
||||
val = args.next
|
||||
env.bindings[symN.atom.sym] = val
|
||||
new result
|
||||
result[] = val
|
||||
|
||||
proc globFunc(env: Env; args: NodeObj): NodeRef =
|
||||
result = newNodeList()
|
||||
for n in args.walk:
|
||||
let a = n.atom
|
||||
case a.kind
|
||||
of atomPath:
|
||||
result.append n
|
||||
of atomString:
|
||||
for match in walkPattern a.str:
|
||||
result.append match.newAtomPath.newNode
|
||||
else:
|
||||
result = newNodeError("invalid glob argument", n)
|
||||
|
||||
proc ingestFunc(env: Env; args: NodeObj): NodeRef =
|
||||
var root = newFsRoot()
|
||||
for n in args.walk:
|
||||
let
|
||||
a = n.atom
|
||||
name = a.path.extractFilename
|
||||
info = a.path.getFileInfo
|
||||
case info.kind
|
||||
of pcFile, pcLinkToFile:
|
||||
let file = env.getFile a.path
|
||||
root.add(name, file)
|
||||
of pcDir, pcLinkToDir:
|
||||
let dir = env.getDir a.path
|
||||
root.add(name, dir)
|
||||
let
|
||||
cid = env.store.putDag(root.toCbor)
|
||||
cid.newAtom.newNode
|
||||
|
||||
proc listFunc(env: Env; args: NodeObj): NodeRef =
|
||||
## Standard Lisp 'list' function.
|
||||
result = newNodeList()
|
||||
new result.headRef
|
||||
result.headRef[] = args
|
||||
result.tailRef = result.headRef
|
||||
while not result.tailRef.nextRef.isNil:
|
||||
result.tailRef = result.tailRef.nextRef
|
||||
|
||||
proc lsFunc(env: Env; args: NodeObj): NodeRef =
|
||||
result = newNodeList()
|
||||
for n in args.walk:
|
||||
let
|
||||
a = n.atom
|
||||
ufsNode = env.getUnixfs a.cid
|
||||
if ufsNode.isDir:
|
||||
for name, u in ufsNode.items:
|
||||
let e = newNodeList()
|
||||
e.append u.cid.newAtom.newNode
|
||||
e.append name.newAtomString.newNode
|
||||
result.append e
|
||||
|
||||
proc mapFunc(env: Env; args: NodeObj): NodeRef =
|
||||
assertArgCount(args, 2)
|
||||
result = newNodeList()
|
||||
let f = args.fun
|
||||
for v in args.next.list:
|
||||
result.append f(env, v)
|
||||
|
||||
proc mergeFunc(env: Env; args: NodeObj): NodeRef =
|
||||
var root = newFsRoot()
|
||||
for n in args.walk:
|
||||
let
|
||||
a = n.atom
|
||||
dir = env.getUnixfs a.cid
|
||||
for name, node in dir.items:
|
||||
root.add(name, node)
|
||||
let cid = env.store.putDag(root.toCbor)
|
||||
cid.newAtom.newNode
|
||||
|
||||
proc pathFunc(env: Env; arg: NodeObj): NodeRef =
|
||||
result = arg.atom.str.newAtomPath.newNode
|
||||
|
||||
proc rootFunc(env: Env; args: NodeObj): NodeRef =
|
||||
var root = newFsRoot()
|
||||
let
|
||||
name = args.atom.str
|
||||
cid = args.next.atom.cid
|
||||
ufs = env.getUnixfs cid
|
||||
root.add(name, ufs)
|
||||
let rootCid = env.store.putDag(root.toCbor)
|
||||
rootCid.newAtom.newNode
|
||||
|
||||
proc walkFunc(env: Env; args: NodeObj): NodeRef =
|
||||
assert args.atom.cid.isValid
|
||||
let
|
||||
rootCid = args.atom.cid
|
||||
walkPath = args.next.atom.str
|
||||
root = env.getUnixfs rootCid
|
||||
final = env.store.walk(root, walkPath)
|
||||
if final.isNil:
|
||||
result = newNodeError("no walk to '$1'" % walkPath, args)
|
||||
else:
|
||||
result = final.cid.newAtom.newNode
|
||||
|
||||
##
|
||||
# Environment
|
||||
#
|
||||
|
||||
proc bindEnv(env: Env; name: string; fun: Func) =
|
||||
assert(not env.bindings.contains name)
|
||||
env.bindings[name] = NodeObj(kind: nodeFunc, fun: fun, name: name)
|
||||
|
||||
proc newEnv(store: DagfsStore): Env =
|
||||
result = Env(
|
||||
store: store,
|
||||
bindings: initTable[string, NodeObj](),
|
||||
paths: initTable[string, FsNode](),
|
||||
cids: initTable[Cid, FsNode]())
|
||||
result.bindEnv "apply", applyFunc
|
||||
result.bindEnv "cbor", cborFunc
|
||||
result.bindEnv "cons", consFunc
|
||||
result.bindEnv "copy", copyFunc
|
||||
result.bindEnv "define", defineFunc
|
||||
result.bindEnv "glob", globFunc
|
||||
result.bindEnv "ingest", ingestFunc
|
||||
result.bindEnv "list", listFunc
|
||||
result.bindEnv "ls", lsFunc
|
||||
result.bindEnv "map", mapFunc
|
||||
result.bindEnv "merge", mergeFunc
|
||||
result.bindEnv "path", pathFunc
|
||||
result.bindEnv "root", rootFunc
|
||||
result.bindEnv "walk", walkFunc
|
||||
|
||||
proc eval(ast: NodeRef; env: Env): NodeRef
|
||||
|
||||
proc eval_ast(ast: NodeRef; env: Env): NodeRef =
|
||||
result = ast
|
||||
case ast.kind
|
||||
of nodeList:
|
||||
result = newNodeList()
|
||||
while not ast.headRef.isNil:
|
||||
# cut out the head of the list and evaluate
|
||||
let n = ast.headRef
|
||||
ast.headRef = n.nextRef
|
||||
n.nextRef = nil
|
||||
let x = n.eval(env)
|
||||
result.append x
|
||||
of nodeAtom:
|
||||
if ast.atom.kind == atomSymbol:
|
||||
if env.bindings.contains ast.atom.sym:
|
||||
result = new NodeRef
|
||||
result[] = env.bindings[ast.atom.sym]
|
||||
else: discard
|
||||
|
||||
proc eval(ast: NodeRef; env: Env): NodeRef =
|
||||
var input = ast[]
|
||||
try:
|
||||
if ast.kind == nodeList:
|
||||
if ast.headRef == nil:
|
||||
newNodeList()
|
||||
else:
|
||||
let
|
||||
ast = eval_ast(ast, env)
|
||||
head = ast.headRef
|
||||
if head.kind == nodeFunc:
|
||||
if not head.nextRef.isNil:
|
||||
input = head.next
|
||||
head.fun(env, input)
|
||||
else:
|
||||
input = NodeObj(kind: nodeList)
|
||||
head.fun(env, input)
|
||||
else:
|
||||
input = head[]
|
||||
newNodeError("not a function", input)
|
||||
else:
|
||||
eval_ast(ast, env)
|
||||
except EvalError:
|
||||
newNodeError(getCurrentExceptionMsg(), input)
|
||||
except FieldError:
|
||||
newNodeError("invalid argument", input)
|
||||
except MissingObject:
|
||||
newNodeError("object not in store", input)
|
||||
except OSError:
|
||||
newNodeError(getCurrentExceptionMsg(), input)
|
||||
|
||||
var scripted = false
|
||||
|
||||
when defined(genode):
|
||||
import dagfsclient
|
||||
proc openStore(): DagfsStore =
|
||||
result = newDagfsClient("repl")
|
||||
scripted = true # do not use linenoise for the moment
|
||||
#[
|
||||
for kind, key, value in getopt():
|
||||
if kind == cmdShortOption and key == "s":
|
||||
scripted = true
|
||||
else:
|
||||
quit "unhandled argument " & key
|
||||
]#
|
||||
else:
|
||||
import ./dagfs/tcp
|
||||
proc openStore(): DagfsStore =
|
||||
var host = ""
|
||||
for kind, key, value in getopt():
|
||||
case kind
|
||||
of cmdShortOption:
|
||||
if key == "s":
|
||||
scripted = true
|
||||
else:
|
||||
quit "unhandled argument " & key
|
||||
of cmdArgument:
|
||||
if host != "":
|
||||
quit "only a single store path argument is accepted"
|
||||
host = key
|
||||
else:
|
||||
quit "unhandled argument " & key
|
||||
if host == "": host = "127.0.0.1"
|
||||
try: result = newTcpClient(host)
|
||||
except:
|
||||
quit("failed to connect to store at $1 ($2)" % [host, getCurrentExceptionMsg()])
|
||||
|
||||
|
||||
import rdstdin
|
||||
|
||||
proc readLineSimple(prompt: string; line: var TaintedString): bool =
|
||||
stdin.readLine(line)
|
||||
|
||||
proc main() =
|
||||
let
|
||||
store = openStore()
|
||||
env = newEnv(store)
|
||||
outStream = stdout.newFileStream
|
||||
readLine = if scripted: readLineSimple else: readLineFromStdin
|
||||
|
||||
var
|
||||
reader = newReader()
|
||||
line = newStringOfCap 128
|
||||
while readLine("> ", line):
|
||||
if line.len > 0:
|
||||
let ast = reader.read(line)
|
||||
if not ast.isNil:
|
||||
ast.eval(env).print(outStream)
|
||||
outStream.write "\n"
|
||||
flush outStream
|
||||
|
||||
main()
|
||||
quit 0 # Genode doesn't implicitly quit
|
Loading…
Reference in New Issue