diff --git a/README.md b/README.md index 7b9115c5fa3..00aa55d349b 100644 --- a/README.md +++ b/README.md @@ -372,7 +372,7 @@ charlie.go("charlie", server); ## Syntax -See [here](design/Syntax.md). +See [here](design/Syntax.html). ## Semantics @@ -382,4 +382,4 @@ TODO... ## Implementation -See [here](design/Implementation.md) +See [here](design/Implementation.html) diff --git a/default.nix b/default.nix index 65d37486fd0..2dfe0e4f1d0 100644 --- a/default.nix +++ b/default.nix @@ -79,6 +79,15 @@ let "stdlib/.*.as" "stdlib/examples/" "stdlib/examples/.*.as" + "stdlib/examples/produce-exchange/" + "stdlib/examples/produce-exchange/.*.as" + "stdlib/examples/produce-exchange/test/" + "stdlib/examples/produce-exchange/test/.*.as" + ]; + stdlib_doc_files = [ + "stdlib/.*\.py" + "stdlib/README.md" + "stdlib/examples/produce-exchange/README.md" ]; in @@ -120,8 +129,7 @@ rec { src = sourceByRegex ./. ( test_files ++ - samples_files ++ - stdlib_files + samples_files ); buildInputs = @@ -137,7 +145,6 @@ rec { buildPhase = '' patchShebangs . asc --version - make -C stdlib ASC=asc all make -C samples ASC=asc all '' + (if test-dvm @@ -226,8 +233,57 @@ rec { [ { name = "bin/FileCheck"; path = "${nixpkgs.llvm}/bin/FileCheck";} ]; wabt = nixpkgs.wabt; + stdlib-reference = stdenv.mkDerivation { + name = "stdlib-reference"; + + src = sourceByRegex ./. ( + stdlib_files ++ + stdlib_doc_files + ) + "/stdlib"; + + buildInputs = with nixpkgs; + [ pandoc bash python ]; + + buildPhase = '' + patchShebangs . + make alldoc + ''; + + installPhase = '' + mkdir -p $out + mv doc $out/ + mkdir -p $out/nix-support + echo "report docs $out/doc README.html" >> $out/nix-support/hydra-build-products + ''; + + forceShare = ["man"]; + }; + + produce-exchange = stdenv.mkDerivation { + name = "produce-exchange"; + src = sourceByRegex ./. ( + stdlib_files + ); + + buildInputs = [ + native + ]; + + doCheck = true; + buildPhase = '' + make -C stdlib ASC=asc OUTDIR=_out _out/ProduceExchange.wasm + ''; + checkPhase = '' + make -C stdlib ASC=asc OUTDIR=_out _out/ProduceExchange.out + ''; + installPhase = '' + mkdir -p $out + cp stdlib/_out/ProduceExchange.wasm $out + ''; + }; + all-systems-go = nixpkgs.releaseTools.aggregate { name = "all-systems-go"; - constituents = [ native js native_test coverage-report ]; + constituents = [ native js native_test coverage-report stdlib-reference produce-exchange ]; }; } diff --git a/src/prelude.ml b/src/prelude.ml index 698702f2e3f..59ee4bab48a 100644 --- a/src/prelude.ml +++ b/src/prelude.ml @@ -40,7 +40,6 @@ func print(x : Text) { (prim "print" : Text -> ()) x }; // Hashing func hashInt(n : Int) : Word32 = (prim "Int~hash" : Int -> Word32) n; - // Conversions func natToWord8(n : Nat) : Word8 = (prim "Nat->Word8" : Nat -> Word8) n; func word8ToNat(n : Word8) : Nat = (prim "Word8->Nat" : Word8 -> Nat) n; diff --git a/stdlib/.gitignore b/stdlib/.gitignore index c1d18d8a8b2..9fbb0098fec 100644 --- a/stdlib/.gitignore +++ b/stdlib/.gitignore @@ -1 +1,2 @@ _out +doc diff --git a/stdlib/Makefile b/stdlib/Makefile index 3cbf4962e62..d34611c7efb 100644 --- a/stdlib/Makefile +++ b/stdlib/Makefile @@ -1,117 +1,154 @@ ASC=../src/asc OUTDIR=_out - -## VT100 stuff -HRULE="\x1b[2;34m----------------------------------------------------------------\x1b[0m" -MODULE_NAME="\x1b[1;32mModule:\x1b[1;34m" -BEGIN="\x1b[0;1mBegin...\x1b[0m" -DONE="\x1b[1mDone.\n"$(HRULE) -MODULE_NAME_COLOR="\x1b[0;1;34m" -NO_COLOR="\x1b[0m" +DOCDIR=doc +MDofAS=./markdown-of-actorscript.py +MDofMD=./markdown-of-markdown.py +PANDOC=pandoc # Add new module targets here: MODULES=\ + Hash \ List \ ListTest \ AssocList \ Trie \ + DocTable \ Set \ SetDb \ SetDbTest \ ProduceExchange \ +WASM=\ + ProduceExchange -OUTFILES=$(addsuffix .out, $(MODULES)) +OUTFILES=$(addsuffix .out, $(MODULES)) $(addsuffix .wasm, $(WASM)) OUTPATHS=$(addprefix $(OUTDIR)/, $(OUTFILES)) -.PHONY: default all clean startmsg +.PHONY: default all clean alltests alldoc docMd docHtml default: all -startmsg: - @echo Begin build: $(MODULE_NAME_COLOR)$(MODULES)$(NO_COLOR)... +docmsg: + @echo Begin building documentation in \`$(DOCDIR)\`... @echo $(HRULE) -all: $(OUTDIR) startmsg $(OUTPATHS) - @echo Build done : $(MODULE_NAME_COLOR)$(MODULES)$(NO_COLOR) +alltests: $(OUTDIR) $(OUTPATHS) + +all: alltests alldoc + +alldoc: docMd docHtml + +# Markdown documentation, extracted from the source directory +docMd: \ + $(DOCDIR)/README.md \ + $(DOCDIR)/prelude.md \ + $(DOCDIR)/hash.md \ + $(DOCDIR)/list.md \ + $(DOCDIR)/assocList.md \ + $(DOCDIR)/trie.md \ + $(DOCDIR)/docTable.md \ + $(DOCDIR)/set.md \ + $(DOCDIR)/setDb.md \ + $(DOCDIR)/setDbTest.md \ + $(DOCDIR)/examples/produce-exchange/README.md \ + $(DOCDIR)/examples/produce-exchange/serverTypes.md \ + $(DOCDIR)/examples/produce-exchange/serverActor.md \ + $(DOCDIR)/examples/produce-exchange/serverModelTypes.md \ + $(DOCDIR)/examples/produce-exchange/serverModel.md \ + | \ + $(DOCDIR)/ \ + $(DOCDIR)/examples/produce-exchange/ + +# HTML documentation, extracted from the source directory +docHtml: \ + $(DOCDIR)/README.html \ + $(DOCDIR)/prelude.html \ + $(DOCDIR)/hash.html \ + $(DOCDIR)/list.html \ + $(DOCDIR)/assocList.html \ + $(DOCDIR)/trie.html \ + $(DOCDIR)/docTable.html \ + $(DOCDIR)/set.html \ + $(DOCDIR)/setDb.html \ + $(DOCDIR)/setDbTest.html \ + $(DOCDIR)/examples/produce-exchange/README.html \ + $(DOCDIR)/examples/produce-exchange/serverTypes.html \ + $(DOCDIR)/examples/produce-exchange/serverActor.html \ + $(DOCDIR)/examples/produce-exchange/serverModelTypes.html \ + $(DOCDIR)/examples/produce-exchange/serverModel.html \ + | \ + $(DOCDIR)/ \ + $(DOCDIR)/examples/produce-exchange/ clean: - rm -rf $(OUTDIR) + rm -rf $(OUTDIR) $(DOCDIR) $(OUTDIR): - @mkdir $(OUTDIR) + @mkdir $@ +$(DOCDIR): + mkdir $@ -$(OUTDIR)/List.out: $(OUTDIR) list.as - @echo $(MODULE_NAME) $(basename $(notdir $@)) - @echo $(BEGIN) - $(ASC) -r $(filter-out $(OUTDIR), $^) > $@ - @echo $(DONE) +$(DOCDIR)/README.md: README.md | $(DOCDIR) + @echo "" > $@ + @echo "" >> $@ + $(MDofMD) $< >> $@ -$(OUTDIR)/ListTest.out: $(OUTDIR) list.as listTest.as - @echo $(MODULE_NAME) $(basename $(notdir $@)) - @echo $(BEGIN) - $(ASC) -r $(filter-out $(OUTDIR), $^) > $@ - @echo $(DONE) +$(DOCDIR)/examples/produce-exchange/: README.md + @mkdir -p $@ -$(OUTDIR)/AssocList.out: $(OUTDIR) list.as assocList.as - @echo $(MODULE_NAME) $(basename $(notdir $@)) - @echo $(BEGIN) - $(ASC) -r $(filter-out $(OUTDIR), $^) > $@ - @echo $(DONE) +$(DOCDIR)/examples/produce-exchange/README.md: examples/produce-exchange/README.md | $(DOCDIR)/examples/produce-exchange/ + @echo "" > $@ + @echo "" >> $@ + $(MDofMD) $< >> $@ -$(OUTDIR)/Trie.out: $(OUTDIR) list.as assocList.as trie.as - @echo $(MODULE_NAME) $(basename $(notdir $@)) - @echo $(BEGIN) +$(OUTDIR)/Hash.out: prelude.as hash.as | $(OUTDIR) $(ASC) -r $(filter-out $(OUTDIR), $^) > $@ - @echo $(DONE) -$(OUTDIR)/Set.out: $(OUTDIR) list.as assocList.as trie.as set.as - @echo $(MODULE_NAME) $(basename $(notdir $@)) - @echo $(BEGIN) +$(OUTDIR)/List.out: prelude.as list.as | $(OUTDIR) $(ASC) -r $(filter-out $(OUTDIR), $^) > $@ - @echo $(DONE) -$(OUTDIR)/SetDb.out: $(OUTDIR) list.as assocList.as trie.as set.as setDb.as - @echo $(MODULE_NAME) $(basename $(notdir $@)) - @echo $(BEGIN) +$(OUTDIR)/ListTest.out: prelude.as list.as listTest.as | $(OUTDIR) $(ASC) -r $(filter-out $(OUTDIR), $^) > $@ - @echo $(DONE) -$(OUTDIR)/SetDbTest.out: $(OUTDIR) list.as assocList.as trie.as set.as setDb.as setDbTest.as - @echo $(MODULE_NAME) $(basename $(notdir $@)) - @echo $(BEGIN) +$(OUTDIR)/AssocList.out: prelude.as list.as assocList.as | $(OUTDIR) $(ASC) -r $(filter-out $(OUTDIR), $^) > $@ - @echo $(DONE) -$(OUTDIR)/ProduceExchange.out: $(OUTDIR) list.as assocList.as trie.as examples/produceExchange.as - @echo $(MODULE_NAME) $(basename $(notdir $@)) - @echo $(BEGIN) +$(OUTDIR)/Trie.out: prelude.as hash.as list.as assocList.as trie.as | $(OUTDIR) $(ASC) -r $(filter-out $(OUTDIR), $^) > $@ - @echo $(DONE) - -######################################################################################### -# TODO(Matthew): Figure out why this "compressed" version of the rules doesn't work. +$(OUTDIR)/DocTable.out: prelude.as hash.as list.as assocList.as trie.as docTable.as | $(OUTDIR) + $(ASC) -r $(filter-out $(OUTDIR), $^) > $@ -# $(OUTDIR)/List.out: $(OUTDIR) list.as ; @$(doModule) +$(OUTDIR)/Set.out: prelude.as hash.as list.as assocList.as trie.as set.as | $(OUTDIR) + $(ASC) -r $(filter-out $(OUTDIR), $^) > $@ -# $(OUTDIR)/ListTest.out: $(OUTDIR) list.as listTest.as ; @$(doModule) +$(OUTDIR)/SetDb.out: prelude.as hash.as list.as assocList.as trie.as set.as setDb.as | $(OUTDIR) + $(ASC) -r $(filter-out $(OUTDIR), $^) > $@ -# $(OUTDIR)/Trie.out: $(OUTDIR) list.as trie.as ; @$(doModule) +$(OUTDIR)/SetDbTest.out: prelude.as hash.as list.as assocList.as trie.as set.as setDb.as setDbTest.as | $(OUTDIR) + $(ASC) -r $(filter-out $(OUTDIR), $^) > $@ -# $(OUTDIR)/Set.out: $(OUTDIR) list.as trie.as set.as ; @$(doModule) +PRODUCE_EXCHANGE_SRC=\ + prelude.as hash.as list.as assocList.as trie.as docTable.as \ + examples/produce-exchange/serverTypes.as \ + examples/produce-exchange/serverModelTypes.as \ + examples/produce-exchange/serverModel.as \ + examples/produce-exchange/serverActor.as \ -# $(OUTDIR)/SetDb.out: $(OUTDIR) list.as trie.as set.as setDb.as ; @$(doModule) +$(OUTDIR)/ProduceExchange.out: $(PRODUCE_EXCHANGE_SRC) \ + examples/produce-exchange/test/simpleSetupAndQuery.as | $(OUTDIR) + $(ASC) -r $(filter-out $(OUTDIR), $^) > $@ -# $(OUTDIR)/SetDbTest.out: $(OUTDIR) list.as trie.as set.as setDb.as setDbTest.as ; @$(doModule) +$(OUTDIR)/ProduceExchange.wasm: $(PRODUCE_EXCHANGE_SRC) | $(OUTDIR) + $(ASC) -c --dfinity -o $@ $(filter-out $(OUTDIR), $^) -# $(OUTDIR)/ProduceExchange.out: $(OUTDIR) list.as trie.as produceExchange.as ; @$(doModule) +$(DOCDIR)/%.md: %.as $(MDofAS) | $(DOCDIR) + @echo "" > $@ + @echo "" >> $@ + @echo "" >> $@ + $(MDofAS) $< >> $@ -# define doModule = -# @echo $(MODULE_NAME) $(basename $(notdir $@)) -# @echo $(BEGIN) -# $(ASC) -r $(filter-out $(OUTDIR), $^) > $@ -# @echo $(DONE) -# endef +$(DOCDIR)/%.html: $(DOCDIR)/%.md + $(PANDOC) -f gfm $^ > $@ diff --git a/stdlib/README.md b/stdlib/README.md index 1be6257d30c..2e81fabd39d 100644 --- a/stdlib/README.md +++ b/stdlib/README.md @@ -1,20 +1,321 @@ -[See #127](https://github.com/dfinity-lab/actorscript/issues/127) +ActorScript Standard Library +============================== + +Prior Documentation +------------------------- + +- [(Jira Story)](https://dfinity.atlassian.net/browse/AST-31): +- Older [See #127](https://github.com/dfinity-lab/actorscript/issues/127) + +-------------------------------------------------------------- + +Produce Exchange +================= + +We focus on explaining the role of these collections from perspective +of the [produce exchange example](https://github.com/dfinity-lab/actorscript/tree/stdlib-examples/stdlib/examples/produce-exchange). + +See also, the [library modules by priority](#library-modules-by-priority). + +[`DocTable`]($DOCURL/docTable.html) +---------- + +A _document table_ abstracts over a **mutable collection of _documents_**, +each _assigned a unique id_ when added to the table. + +The table is essentially a mutable mapping, augmented with: + +- a mechanism to generate distinct ids and assign them to new documents, and +- an associated shallow projection, from each document to its +associated shallower _information_ type, for use in server-to-client +messages. + +See also + [`Index`](#index), + [`Map`](#map), + [`Trie`](#trie) +and [`AssocList`](#assoclist). + +[`Trie`]($DOCURL/trie.html) +---------- + +Represent a finite map with a _canonical binary tree_, based on hashing each key. + +[`AssocList`]($DOCURL/assocList.html) +------------ + +Represent a finite map with an _association list_ a list of key-value pairs. + +[`List`]($DOCURL/list.html) +----------- + +Linked lists. + +------------------------------------------------------------------------------------- + +To do +========= + +`Trie2D` +---------- + +A Trie2D is a trie that maps each key to a _secondary trie_, representing a _second dimension_. + +A Trie2D represents a 2D mapping, but in a kind of _curried form_, +that permits efficient, eagerly-applied partial application of the +first dimension's keys. + +To do --- separate from existing `Trie` module + + +`Index` +---------- + +An _index_ abstracts over a **mutable finite map**_ with fixed key and +value types, perhaps implemented by a purely-functional data +structure. + +To do --- create as a simplified form of existing `DocTable` class + +See also + [`DocTable`](#doctable), + [`Map`](#index), + [`Trie`](#trie) +and [`AssocList`](#assoclist). + +`Map` +------- + +An abstraction for functional finite maps with fixed key and value +types. + + +To do --- create as a simplified form of existing `DocTable` class + +See also + [`Trie`](#trie) + [`AssocList`](#assoclist), + [`Index`](#index), +and [`DocTable`](#doctable). + + +`Hash` +----------- + +To do + +Convert iterable bit strings into compact hashes. + +Convert each compact hash into its canonical iterable bit string (but +not reversing the hash, of course!). + + +`Array` +----------- + +To do + +Concatenate arrays; query outputs are arrays of records, represented +either directly, or with a cursor. + + +`TrieCursor` +--------------- + +Provide navigational controls for a trie, e.g., holding query results. + +To do + + +`Bits` +--------- + +Bit strings + +Convert "everything", e.g., each kind of entity in a DFINITY canister +application, into its canonical iterable bit string, for the purposes of: + + - hashing it, e.g., for use as complex keys in databases or caches + - serializing it and de-serializing it + - representing in IDL / system messages / wire format + +To do + +**Open questions** + +- If, how and when will the compiler/language just support this stuff "for free"? +- versus, how much should we write as ActorScript-based abstractions? + + +--------------------------------------------------------------- + +Library Modules, by priority +============================== + +Primary modules +----------------------- + +See also, the [produce exchange](#produce-exchange) use case. -Critical modules -================== - [x] **List**: See [`List` module from SML Basis library](http://sml-family.org/Basis/list.html). - [x] **Hashtrie**: Persistent maps, as functional hash tries. - [x] **Set**: Persistent sets, based directly on persistent maps. - [ ] **Hashtable**: Mutable maps, as imperative hash tables. Secondary modules -================== +------------------------ + These modules _may_ be useful in the collections library: - [ ] **Stream**: Type def done; most operations are pending... Other modules -================== +----------------- + These modules are merely exercises (toys/examples), and _not_ essential to the collections library: - [ ] **Thunk**: Type def and some operations are done. +---------------------------------------------------------------- + + +Performance considerations +==================================================================== + +After having a functioning specification, we will employ the thoughts +below toward getting better performance. + +The main thrust of the work on this canister is currently focused on +creating an executable prototype. + +At some point (near the end of our test suite components), we will +want to consider botj the **asymptotic** and **constant-factor** performance +properties of our implementation. + +In particular, this performance is phrased in terms of **workloads**, +executing update & query behavior over the PESS server definition +implementation. + +We shall vary workloads in kind and size, and measure space and time +usage of the Wasm VM running this implementation, in terms of the +standard library of collections implemented here. + +We shall compare the performance on fixed workloads across varying +representations of the `Map` data structure that we use. + +Notably, the one and only collection type used in this implementation +is the `Map` type. With two implementations: + + - [Association lists]() + - [Hash tries]() + +We use purely-functional data structures for `Map` since their design +permits `O(1)`-time/space for sharing, and their immutability makes +them suitable for mathematical reasoning. + +As explained below, the hash trie representation is asymptotically +efficient for large sizes; while association lists are not, they are +suitable for small sub-cases, including those where hash collisions +occur in the trie structure. + +These mathematical properties are practically important for affording +a reasonably-efficient executable specification, but they also suggest +even more optimized representations, with the same mathematical +properties. First, + +Hash tries +--------------------------------- + +Before considering other variations, we review the basic properties of +the hash trie representation. + +Crucially, the hash trie implementation of `Map` uses a _functional +representation_, with expected times as follows (expected time +analysis, because of hashing): + +``` + Trie.copy : O(1) + Trie.find : O(log n) + Trie.replace, .insert, .remove : O(log n) + Trie.merge, .split : O(log n) + Trie.union : O(n) + Trie.intersect : O(n) +``` + +Alternative representations +---------------------------- + +We consider variations of how to represent a `Map`, both as variations +of the hash trie, and as other representations as well (see below). + +First, we might consider validating the followin claim: + +> **Claim:** The asymptotic properties of the hash trie are ideal +> for a practical (infinitely-scalable) implementation of PESS. + +Before considering other representations, we should evaluate this +claim on randomly-generated use-cases of varying size, to simulate +realistic (but synthetic) workloads, and measure time and space usage +by the Wasm VM. + +Once we can generate performance plots, we should consider comparing +different representations for `Map` that still use a hash trie. + +Chunks +------- + +A simple variation of the hash trie uses **"chunks"** at the leaves, +to represent sub-maps of the threshhold size where the pointers +involved in the per-hash-bit branching no longer pays off. + +So, we may first consider additional implementations by varying the +details of these chunks: + + - when the basecase of the hash trie occurs, and + - how the basecase of the hash trie is represented + +We consider several simple, but practical representations of chunks below. + + +Association array representation: +--------------------------------------- + +Association arrays are optimized for cache locality. They each store +a key-value mapping as two arrays: one of keys, and one of values. To +find a key-value pair, do a linear-scan in the array of keys to +find the corresponding position of the value, in that array. Regrow +the two arrays by doubling, or some other scheme. + +``` + Aa.copy : O(n) + Aa.find : O(n) + Aa.replace, .insert, .remove : O(n) + Aa.merge, .split : O(n) +?? Aa.union : O(n) +?? Aa.intersect : O(n) +``` + +Hashtable representation: +--------------------------------------- + +A traditional hash table uses an array as a table, indexed by hashes. +It handles hash collisions somehow, perhaps by doing a simple linear +scan. It regrows the table by doubling, or some other scheme. It may +or may not shrink the table. + +``` + Htbl.copy : O(n) + Htbl.find : O(1) + Htbl.replace, .insert, .remove : O(1) + Htbl.merge, .split : O(n) +?? Htbl.union : O(n) +?? Htbl.intersect : O(n) +``` + +### Swiss tables + +Swiss tables are special implementations of imperative hash tables that try to be extra efficient, compared to other options: + + - [Swiss tables blog post](https://abseil.io/blog/20180927-swisstables) + - [Swiss tables in Rust](https://github.com/Amanieu/hashbrown) + + diff --git a/stdlib/assocList.as b/stdlib/assocList.as index 67d2b40bf81..6a4194cdec2 100644 --- a/stdlib/assocList.as +++ b/stdlib/assocList.as @@ -1,14 +1,24 @@ -/* - * Association Lists, a la functional programming, in ActorScript. - */ +/** +Association Lists +================== + +Association Lists, a la functional programming, in ActorScript. + +Implements the same interface as `Trie`, but as a linked-list of key-value pairs. + +*/ // polymorphic association linked lists between keys and values type AssocList = List<(K,V)>; let AssocList = new { - // find the value associated with a given key, or null if absent. + /** + `find` + -------- + find the value associated with a given key, or null if absent. + */ func find(al : AssocList, k:K, k_eq:(K,K)->Bool) @@ -28,8 +38,12 @@ let AssocList = new { rec(al) }; - // replace the value associated with a given key, or add it, if missing. - // returns old value, or null, if no prior value existed. + /** + `replace` + --------- + replace the value associated with a given key, or add it, if missing. + returns old value, or null, if no prior value existed. + */ func replace(al : AssocList, k:K, k_eq:(K,K)->Bool, @@ -61,9 +75,13 @@ let AssocList = new { rec(al) }; - // The key-value pairs of the final list consist of those pairs of - // the left list whose keys are not present in the right list; the - // values of the right list are irrelevant. + /** + `diff` + --------- + The key-value pairs of the final list consist of those pairs of + the left list whose keys are not present in the right list; the + values of the right list are irrelevant. + */ func diff(al1: AssocList, al2: AssocList, keq: (K,K)->Bool) @@ -83,15 +101,19 @@ let AssocList = new { rec(al1) }; - // This operation generalizes the notion of "set union" to finite maps. - // Produces a "disjunctive image" of the two lists, where the values of - // matching keys are combined with the given binary operator. - // - // For unmatched key-value pairs, the operator is still applied to - // create the value in the image. To accomodate these various - // situations, the operator accepts optional values, but is never - // applied to (null, null). - // + /** + `disj` + -------- + This operation generalizes the notion of "set union" to finite maps. + Produces a "disjunctive image" of the two lists, where the values of + matching keys are combined with the given binary operator. + + For unmatched key-value pairs, the operator is still applied to + create the value in the image. To accomodate these various + situations, the operator accepts optional values, but is never + applied to (null, null). + + */ func disj(al1:AssocList, al2:AssocList, keq:(K,K)->Bool, @@ -125,12 +147,16 @@ let AssocList = new { rec1(al1) }; - // This operation generalizes the notion of "set intersection" to - // finite maps. Produces a "conjuctive image" of the two lists, where - // the values of matching keys are combined with the given binary - // operator, and unmatched key-value pairs are not present in the output. - // - func conj(al1 : AssocList, + /** + `join` + --------- + This operation generalizes the notion of "set intersection" to + finite maps. Produces a "conjuctive image" of the two lists, where + the values of matching keys are combined with the given binary + operator, and unmatched key-value pairs are not present in the output. + + */ + func join(al1 : AssocList, al2:AssocList, keq:(K,K)->Bool, vbin:(V,W)->X) @@ -151,6 +177,10 @@ let AssocList = new { }; + /** + `fold` + --------- + */ func fold(al:AssocList, nil:X, cons:(K,V,X)->X) diff --git a/stdlib/docTable.as b/stdlib/docTable.as new file mode 100644 index 00000000000..d66eb189f27 --- /dev/null +++ b/stdlib/docTable.as @@ -0,0 +1,346 @@ + +/** + +Document Table +=============== + +This table abstracts over a set of _documents_, each with a distinct +id assigned by this abstraction. + +Documents potentially contain _deep nested structure_, e.g., other +document collections, etc. + +Each document has a shallow, lossy projection to its _document +information_; this information may contain more than a unique id, but +is sufficiently concise to transmit in a server-to-client message. +Likewise, document information seeds a new document, e.g., in a +client-to-server message with this _initial document information_. + +See the [client interface](#client-interface) below for detailed +type information. + +*/ + +/** + Representation + ================ + A table is a finite map (currently a [Trie]($DOCURL/trie.html)) mapping ids to documents. + + See also: [private state](#private-state). + + notes on representation + ------------------------- + + The ActorScript standard library provides several purely-functional finite map representations: + + - as association lists (via modules `List` and `AssocList`) + - and as hash tries (via (module `Trie`), whose representation uses those lists, for its + "buckets". + + These map representations could change and expand in the future, so we + introduce the name `Table` here to abstract over the representation + choice between (for now) using tries (and internally, association lists). + + */ +type Table = Trie; +let Table = Trie; + +/** + + Aside: Eventually, we'll likely have a more optimized trie that uses + small arrays in its leaf nodes. The current representation is simple, + uses lots of pointers, and is likely not the optimal candidate for + efficient Wasm. However, its asymptotic behavior is good, and it thus + provides a good approximation of the eventual design that we want. + +*/ + +/** + Client interface + =============================== + + When the client provides the [parameters below](#client-parameters), +this module [implements the public interface given further +below](#public-interface). + + */ + +/** + Client parameters + ================== + + The document table abstracts over the following client choices: + + - types `Id`, `Doc` and `Info`. + - `idFirst,` -- the first id to use in the generation of distinct ids. + - `idIncr` -- increment function for ids. + - `idIsEq` -- equality function for ids. + - `idHash` -- hash function for ids. + - `infoOfDoc` -- project the document information from a document. + - `docOfInfo` -- seed and validate client-provided document information. + + See the types below for details. + + */ +class DocTable( + idFirst:Id, + idIncr:Id->Id, + idIsEq:(Id,Id)->Bool, + idHash:Id->Hash, + infoOfDoc:Doc->Info, + docOfInfo:Info->?Doc +) = this { + +/** + Public interface + =============================== +*/ + + /** + `empty` + --------- + + See also [`Table.empty`]($DOCURL/trie.md#empty) + + */ + + empty() : Table { + Table.empty() + }; + + /** + `getTable` + --------- + + See also [`Table.copy`]($DOCURL/trie.md#copy) + + */ + + getTable() : Table { + Table.copy(table) + }; + + /** + `addDoc` + --------- + + See also [`Table.insertFresh`]($DOCURL/trie.md#insertfresh) + + */ + + addDoc(doc:Id -> Doc) : (Id, Doc) { + let id = idNext; + idNext := idIncr(idNext); + let d = doc(id); + table := Table.insertFresh + (table, keyOfId(id), idIsEq, d); + (id, d) + }; + + /** + `updateDoc` + --------- + + See also [`Table.replace`]($DOCURL/trie.md#insertfresh) + + */ + + updateDoc(id:Id, doc:Doc) : ?Doc { + let (updatedTable, oldDoc) = Table.replace + (table, keyOfId(id), idIsEq, ?doc); + table := updatedTable; + oldDoc + }; + + /** + `addInfoAs` + --------- + + See also [`addInfo`](#addinfo). + + This variant of `addInfo` permits the caller to choose the id, but still insists that it be fresh (not currently in use). + + */ + addInfoAs(idChoice:?Id, info:Id -> Info) : ?(Id, Doc) { + switch idChoice { + // subcase: No pre-chosen Id, so mint a new fresh one: + case null { + let id = idNext; + let doc = docOfInfo(info(id)); + switch doc { + case null { null }; + case (?doc) { + idNext := idIncr(idNext); + table := Table.insertFresh + (table, keyOfId(id), idIsEq, doc); + ?(id, doc) + } + } + }; + // subcase: Have a pre-chosen Id, so use that; still must be fresh. + case (?idChoice_) { + let doc = docOfInfo(info(idChoice_)); + switch doc { + case null { null }; + case (?doc) { + table := Table.insertFresh + (table, keyOfId(idChoice_), idIsEq, doc); + ?(idChoice_, doc) + } + } + }; + } + }; + + /** + `addInfo` + --------- + + See also [`Table.insertFresh`]($DOCURL/trie.md#insertfresh) + + */ + addInfo(info:Id -> Info) : ?(Id, Doc) { + addInfoAs(null, info) + }; + + addInfoGetId(info:Id -> Info) : ?Id { + switch (addInfo(info)) { + case null { null }; + case (?(id, doc)) { ?id } + } + }; + + /** + `rem` + --------- + + See also [`Table.removeThen`]($DOCURL/trie.md#removeThen) + + */ + + rem(id:Id) : ?Doc { + Table.removeThen( + table, keyOfId(id), idIsEq, + func (t:Table, d:Doc) : ?Doc { + table := t; + ?d + }, + func ():?Doc = null + ) + }; + + + remGetId(id:Id) : ?Id { + Table.removeThen( + table, keyOfId(id), idIsEq, + func (t:Table, d:Doc) : ?Id { + table := t; + ?id + }, + func ():?Id = null + ) + }; + + remGetUnit(id:Id) : ?() { + Table.removeThen( + table, keyOfId(id), idIsEq, + func (t:Table, d:Doc) : ?() { + table := t; + ?() + }, + func ():?() = null + ) + }; + + /** + `getDoc` + --------- + + See also [`Table.find`]($DOCURL/trie.md#find) + + */ + + getDoc(id:Id) : ?Doc { + Table.find(table, keyOfId(id), idIsEq) + }; + + /** + `getInfo` + --------- + */ + + getInfo(id:Id) : ?Info { + switch (getDoc(id)) { + case null null; + case (?doc) { ?infoOfDoc(doc) }; + } + }; + + /** + `count` + --------- + + See also [`Table.count`]($DOCURL/trie.md#count) + */ + + count() : Nat { + Table.count(table) + }; + + /** + `allDoc` + --------- + + See also [`Table.toArray`]($DOCURL/trie.md#toarray) + */ + + allDoc() : [Doc] { + Table.toArray + (table, func (id:Id, doc:Doc):[Doc] = [doc] ) + }; + + /** + `allInfo` + --------- + + See also [`Table.toArray`]($DOCURL/trie.md#toarray) + */ + + allInfo() : [Info] { + Table.toArray + (table, func (id:Id, doc:Doc):[Info] = [infoOfDoc(doc)] ) + }; + + +/** + Public helpers + =============== + */ + + keyOfId(x:Id) : Key = new { key = x ; hash = idHash(x) }; + + getIdIsEq() :(Id,Id)->Bool = idIsEq; + getIdHash() : Id->Hash = idHash; + + getInfoOfDoc() : Doc->Info = infoOfDoc; + getDocOfInfo() : Info->?Doc = docOfInfo; + + +/** + Private state + =============== + */ + + private var idNext:Id = idFirst; + + private var table : Table = null; + +/** + Helpers + =============== + */ + + +/** The end */ + +} diff --git a/stdlib/examples/produce-exchange/README.md b/stdlib/examples/produce-exchange/README.md new file mode 100644 index 00000000000..ac1e5882ba8 --- /dev/null +++ b/stdlib/examples/produce-exchange/README.md @@ -0,0 +1,557 @@ +Produce Exchange Canister Component +===================================== + +We give an example of ActorScript by implementing +the **Produce Exchange Canister** in ActorScript. + +The **produce exchange** gives a realistic marketplace-like +application, and serves as a canonical example DFINITY Dapp. + +We use it here to illustrate ActorScript the language, the standard +library, and the associated the DFINITY Dapp design process. + +Prior documentation +------------------- + +The design of the Produce Exchange example Dapp began as a [two page +Google drive +document](https://docs.google.com/document/d/1AxpcuFH-x_0ZSa32DfM_BCYnGxCS37ETPNWE4BXDNdo/edit), +giving the Dapp's functional specifications. + +Current design documentation: +----------------------------- + +The documentation of this design now evolves in two places: + + 1. The SDK and ActorScript teams' documentation: + i. Current [design document, under the SDK + space](https://dfinity.atlassian.net/wiki/x/MwD2Bg). + ii. Current [requirements document for the MVP + Design](https://dfinity.atlassian.net/wiki/spaces/DE/pages/116654198/Produce+Exchange+MVP+Product+Requirements). + iii. Early, older [documentation under the ActorScript space](https://dfinity.atlassian.net/wiki/spaces/AST/pages/104401122/Example+Dapp+Produce+Exchange). + + 2. [**This documentation and associated source + code**](https://github.com/dfinity-lab/actorscript/tree/stdlib-examples/stdlib/examples/produce-exchange) + + This code is implementing the **Produce Exchange Canister component**, + as a way to push the development of the ActorScript language, its + standard library, and elsewhere, the ambient DFINITY system that + runs ActorScript canisters. + +-------------------------------------------------------------- + +Produce Exchange Standards Specification +================================================== + +The Produce Exchange is a DFINITY canister whose implementation +defines a set of _standards_ whose **formal specification** we refer to collectively as +the _"Produce Exchange Standards Specification"_. + + +Organizational overview +---------------------------- + +We break the standards definition into several files, described below in detail as +[**server components**](#server-components). + +**Server message formats** + +As ActorScript-based documentation, the embedded source code for these +components makes the standards definition into a **formal definition**, to +the same degree that ActorScript has a formal semantics of its own, in +terms of DFINITY's semantics, etc: + +- The [server types](#server-types) define `shared` data types for client and server messages. + +- The [server actor](#server-actor) defines the server message interface for all clients. + +**Server message behavior** + +The _behavior_ of this server defines the _semantic_ aspects of the +standards definition. + +The _implementation details_ of this behavior are not included in the +standards definition. We include a prototype specification of this behavior, +which is subject to change: + +- The [server model types](#server-model-types) define the internal data model used by the server to support its behavior. + +- The [server model implementation](#server-model-implementation) defines the server behavior for all clients. + + +Organizational design +--------------------------- + +To determine how each component evolves, we employ the following design philosophy. + +**Server message formats**: + +The server actor defines an interface boundary that only uses types +from the server types component (no model types, no collection types +from the standard library). + +**Design principle for interface design**: + +Whenever possible, we will push the implementation of **"business logic"** +into the **server _model_ components**, with the aspiration of the server +component itself being a minimal wrapper over model definitions, and +little to no logic of its own. + +These models are based closely on the ActorScript **standard library**, +and basic functional programming design patterns, which we +demonstrate through this example. + +The standard library provides programming abstractions for +_executable_ functional specifications that run on the DFINITY system. + +Whenever possible, we push reusable patterns and structures from the +model components into the standard library, with the aspiration of the +model components themselves being minimal wrappers over the standard +library. The latter gives the former a simple **mathematical +vocabulary**, based on **pure functional programming**, for the +specified Canister behavior. + + +Server components +========================== + +We decompose the _Canister_ for the **Produce Exchange example Dapp** +into an _ActorScript-based_ implementation of a "**Server**" with the +following definitional pieces, listed below. + +**Server types** +----------------- + +Basic types used in messages, and published/stored internally in the server actor's state. + +See [`serverTypes.md`]($DOCURL/examples/produce-exchange/serverTypes.html) for authoritative documentation. + +See [`serverTypes.as`](https://github.com/dfinity-lab/actorscript/blob/stdlib-examples/stdlib/examples/produce-exchange/serverTypes.as) for the source code. + +**Server actor** +---------------------- + +Defined by the server actor's public signature, which specifies the messages and message formats for each participant. + +See [`serverActor.md`]($DOCURL/examples/produce-exchange/serverActor.html) for authoritative documentation. + +See [`serverActor.as`](https://github.com/dfinity-lab/actorscript/blob/stdlib-examples/stdlib/examples/produce-exchange/serverActor.as) for the source code. + + +**Server model types** +------------------------ + +This component defines structures that the next component uses to implement the server actor; neither component is exposed by the actor's public-facing interface. + +See [`serverModelTypes.md`]($DOCURL/examples/produce-exchange/serverModelTypes.html) for authoritative documentation. + +See [`serverModelTypes.as`](https://github.com/dfinity-lab/actorscript/blob/stdlib-examples/stdlib/examples/produce-exchange/serverModelTypes.as) for the source code. + +**Standard library** +-------------------------- + +See [the standard library](https://github.com/dfinity-lab/actorscript/tree/stdlib-examples/stdlib/#produce-exchange) +for collection abstractions, +including + the [`DocTable` class]($DOCURL/docTable.html) +and the [`Trie` type]($DOCURL/trie.html). + +**Server model implementation** +-------------------------------- + +The model implementation formally defines the _behavioral +(input-output-based) semantics_ of [each message type](#server-actor), +by implementing the server's interface in terms of the [_server model +types_](#server-model-types). + +See [`serverModel.md`]($DOCURL/examples/produce-exchange/serverModel.html) for authoritative documentation. + +See [`serverModel.as`](https://github.com/dfinity-lab/actorscript/blob/stdlib-examples/stdlib/examples/produce-exchange/serverModel.as) for the source code. + +Note that while we define the **behavior for the server**, the +_implementation details of this component and [server model +types](#server-model-types) themselves are not in definition, and are +subject to change independently of this definition. + +**Aside:** This model implementation is highly formulaic. In the +future, we could likely _derive_ such implementations (auto-generate +them) from a higher-level property and relation markup language +defined over, and targeting, the existing actorscript type system and +associated standard library patterns. + + +Test suite components +========================= + +We decompose the **test suite** for the Produce Exchange into the following milestones and associated test components: + + 5. **Server actor example uses**: + Use the system on small examples, whose output can be shown in a demo, presentation, slide deck, etc. + + To do + + 6. **Automated regression tests**: + Generate (online or offline) random example uses, and record the current output; check future revisions against this output. + + To do + + 7. **Performance models**: + Generate (online or offline) random example uses, and record time and space usage of the Wasm VM across different work loads; plot this data, and generate human-readable reports about it. + + To do + +See below for [more thoughts about performance testing]($DOCURL/stdlib/examples/produce-exchange#performance-considerations). + + + +---------------------------------------------------------------------------- + + +[Produce Exchange Canister: MVP Requirements](https://dfinity.atlassian.net/wiki/spaces/DE/pages/116654198/Produce+Exchange+MVP+Product+Requirements) +============================================= + +**User interaction and design** + +The only thing established are five views for the MVP: + +- Sign up/Login/Logout page +- Producer view - if user authenticated is a producer +- Transporter view - if user authenticated is a transporter +- Retailer view - if user authenticated is a retailer +- Developer view - if user authenticated is a Developer + +ALL USERS +--------------- + +> **Sign up** User can add their name and role and receive a unique ID + +See these, provided by the [**registrar** role]($DOCURL/examples/produce-exchange/serverActor.md#registrar-based-ingress-messages): +- [**`registrarAddProducer`**]($DOCURL/examples/produce-exchange/serverActor.md#registraraddproducer) +- [**`registrarAddTransporter`**]($DOCURL/examples/produce-exchange/serverActor.md#registraraddtransporter) +- [**`registrarAddRetailer`**]($DOCURL/examples/produce-exchange/serverActor.md#registraraddretailer) + +> **Log in** A user can identify themselves from a dropdown and "log in" + +??? no op + +> **Log out** User can log out of the app + +??? no op + +PRODUCER +------------- +> **Add/update inventory** Producer updates the goods, prices in the inventory available on the exchange + +See [**`produerAddInventory`**]($DOCURL/examples/produce-exchange/serverActor.md#produceraddinventory) +and [**`produerRemInventory`**]($DOCURL/examples/produce-exchange/serverActor.md#producerreminventory) + +> **View inventory** Producer can see their inventory + +See [**`producerAllInventoryInfo`**]($DOCURL/examples/produce-exchange/serverActor.md#producerallinventoryinfo) + +> **View past sales orders** Producer can see sales orders they fulfilled in the past + +Note: We call them "reservations". + +See [**`producerReservations`**]($DOCURL/examples/produce-exchange/serverActor.md#producerreservations) + +> **View "market price"** Producer can see the last sales price for any good within any geographic area + +See [**`produceMarketInfo`**]($DOCURL/examples/produce-exchange/serverActor.md#producemarketinfo) + +TRANSPORTER +--------------------- +> **Add/update routes** Transporter updates the routes available on the exchange. Transporter can see their routes. Each route is composed of an origin zone, destination zone, pickup date, delivery date, cost. + +See [**`transporterAddRoute`**]($DOCURL/examples/produce-exchange/serverActor.md#transporteraddroute) +and [**`transporterRemRoute`**]($DOCURL/examples/produce-exchange/serverActor.md#transporterremroute) + +> **View routes** Transporter can see their routes. Each route is composed of an origin zone, destination zone, pickup date, delivery date, cost. + +See [**`transporterAllRouteInfo`**]($DOCURL/examples/produce-exchange/serverActor.md#transporterallrouteinfo) + +> **View past sales orders** Transporter can see routes which were utilized in the past + +Note: We call them "reservations". + +See [**`transporterAllReservationInfo`**]($DOCURL/examples/produce-exchange/serverActor.md#transporterallreservationinfo) + + +RETAILER +------------------- +> **Query inventory** Retailer can query a good with a delivery date. The Exchange will return a list of goods (and prices) that can be delivered to that retailer's geography within that date. + +See [**`retailerQueryDates`**]($DOCURL/examples/produce-exchange/serverActor.md#retailerquerydates) +and +[**`retailerQueryAll`**]($DOCURL/examples/produce-exchange/serverActor.md#retailerqueryall) + + +> **Place a sales order** Retailer can place order for one or more of options presented by any query. + +Note: We call them "reservations". + +See [**`retailerReserve`**]($DOCURL/examples/produce-exchange/serverActor.md#retailerreserve) +and +[**`retailerReserveCheapest`**]($DOCURL/examples/produce-exchange/serverActor.md#retailerreservecheapest) + +> **View past sales orders** Retailer can see sales orders they placed in the past + +Note: We call them "reservations". + +See [**`retailerReservations`**]($DOCURL/examples/produce-exchange/serverActor.md#retailerreservations) + + + +EXCHANGE DAPP DEVELOPER +--------------------------- + +> **View GMV** Developer can see aggregate sum of how many sales have been processed + +See [**`devViewGMV`**]($DOCURL/examples/produce-exchange/serverActor.md#devviewgmv). + +> **View queries** Developer can see how many aggregate queries have been made by all retailers + +See [**`devViewQueries`**]($DOCURL/examples/produce-exchange/serverActor.md#devviewqueries). + +> **View sales orders** Developer can see how many aggregate sales orders have been made by all retailers + +See [**`devViewReservations`**]($DOCURL/examples/produce-exchange/serverActor.md#devviewreservations). + +> **View producers** Developer can see how many producers in the system and how many goods each has + +See [**`devViewProducers`**]($DOCURL/examples/produce-exchange/serverActor.md#devviewproducers). + +> **View transporters** Developer can see how many producers in the system and how many goods each has + +See [**`devViewTransporters`**]($DOCURL/examples/produce-exchange/serverActor.md#devviewtransporters). + +> ****View retailers** Developer can see how many retailers in the system and how many queries and how many sales orders + +See [**`devViewRetailers`**]($DOCURL/examples/produce-exchange/serverActor.md#devviewretailers). + + + +--------------------------------------------------------------------------------- + +Define "Done" +================================ + +See also: [Exit criteria](#exit-criteria) +---------------------------------------- + +Merge to `master` requirements: +-------------------------------- + +This example is a work in progress. It will be finished (and merged +to `master`) when the following are in a stable state, and working +locally, as locally-running ActorScript. + +**Requirements list**: + 1. **the exchange’s interface definition**, as an ActorScript actor. + + 2. **the behavior of the exchange**, as that actor’s prototype + implementation. + + 3. **the internal data model representation**, based on the + ActorScript language and standard library, used internally to the + actor, to implement the specification behavior. + + 4. **test suite** for all of the above, using local execution and + ActorScript-based test cases. + +**Canister component**: The requirements listed above also correspond with +those of the *Canister component* in Milestone 2.0 of the [MVP +Requirements Spec, 1.ii +above](https://dfinity.atlassian.net/wiki/spaces/DE/pages/116654198/Produce+Exchange+MVP+Product+Requirements). + + +--------------------------------------------------------------------------------- + +Exit Criteria +===================== + +Background definitions +--------------- + +Mack (via email correspondance): + +**Exit criteria**: + +> What objective tests will tell us that we have successfully built +> the right thing? as a part of the functional specification - can be +> implicit as you get good at it, better to start out explicit with a +> separate list at first. + +Exit criteria and process: + +> Before transitioning from functional specification to design, +> clearly document assumptions (max 1000 concurrent users, prices +> never exceed X, alpha not needed before Aug1, do not have to handle +> [most important - non-goals] ) and dependencies. Review these and +> get broad buy-in before starting design in earnest. + + + + +["Exit criteria" on wikipedia](https://en.wikipedia.org/wiki/Exit_criteria): + +> understanding goals clearly; using language (and data) carefully +> when talking about (or measuring) methods for getting things done; +> and taking a scientific approach towards evaluating and improving +> the methods that are used. + +> set of test specifications are created to test this new product to +> ensure that it meets minimum acceptable operational +> specifications. This test specification will state the minimum +> criteria necessary for the testing process to be considered complete +> and the product is ready for release IE: Exit the testing phase of +> the program + +[Rice Consultng Services](https://www.riceconsulting.com/public_pdf/Ins%20and%20Outs%20of%20Entry%20and%20Exit%20Criteria%20-%20ASTQB%20Webinar%20v2.pdf) + +Using DEFECT METRICS AS ENTRY AND EXIT CRITERIA: +- _NUMBER OF DEFECTS OUTSTANDING BY STATUS_ +- _PERCENTAGE OF TESTS THAT EVENTUALLY PASS_ +- _NUMBER OF TESTS THAT CONTINUE TO FAIL_ + +Exit criteria examples +-------------------------- + +Concrete examples at DFINITY: + + - [M1 Exit Criteria](https://dfinity.atlassian.net/wiki/spaces/M1/pages/491712/Basenet+M1+Release+Exit+Criteria) + + +Exit criteria for this Canister +-------------------------------- + + +**Dates and people** + +- [MVP features](#produce-exchange-canister-mvp-requirements) done on or before March 26 ActorScript team meeting + +- Full exit criteria met asap, before ActorScript and/or SDK launch (?) + +- Most items below are tasked to @Matthewhammer; some require support + from other teams at DFINITY, including the ActorScript and SDK + teams. + +**Feature-based metrics** + +- Performs all features defined by MVP design doc, in the sense that + every use case has one or more server messages that facilitate the + use case. + +- Front-end interacts with this Canister via DFINITY; see operational + metrics below for more. + +**Test-based metrics** + +- Hand-scripted, automated tests on small numbers of entities. + +- Automatically generate some test data to variable sizes, including + **hundreds or thousands** each of the following: regions, retailers, + transporters, producers, inventory, routes. Produce and truck types + will still be hand-coded. + +- Run automated tests that populate the system (with 100s or 1000s of + entities of each type), then simulate the retailers making random + queries and reservations over this random exchange data. + +- Record machine-readable logs of these simulations, e.g., for + future data analysis, demo visualizations, and regression tests. + +**Operational metrics** + +- We can run this Canister on a single Wasm VM, within a single + DFINITY node, running on its own. + +- No hard number requirements yet + +**Performance metrics** + +- We can gather performance metrics about the Wasm VM, whatever they + may be. + +- No hard number requirements yet; any initial numbers are okay here; the point is to have the + ability to gather numbers, not perform at a certain operational + level (yet). + + +**Extras 1: Maybe not included** + +- Standard library used by exchange is fully documented + +- Standard library used by exchange has unit tests of its own + + +**Extras 2: definitely not included** + +- Standard library collections used by exchange have high-performance representations + +- Grow automated test system into a random test-simulation + system. Perform simple goal-directed simulations; e.g., run a + mark place simulation in time, wherein entities interact through the + exchange, in simple goal-directed ways (e.g., every retailer tries + to buy all the produce that they can, but get each kind of produce + at the cheapest costs; the producers and transporters use + marketplace information or other constraints to choose prices, + routes and inventory). + + +---------------------------------------------------------------------------- + +Open Questions: +================ + +We have the following questions: + + 1. Massive result messages: How do we represent and send these? + + - lazy lists? (seems "easy" from AS programmer perspective, but + requires non-first-order data in the IDL) + + - list iterators? (almost as good as lazy lists, but requires + references in the IDL, and complicates the GC story). + + - arrays? (expensive to build and send; can become way *too big*). + + **2019.03.12 -- TODO-Cursors:** Introduce the idea of "cursors", with + allocation, de-allocation and movement messages, as per discussion in + the 2019.03.12 ActorScript Team meeting. + + 2. For now, can we assume that the canister is maintained by the + central authority? + + 3. "Conditional updates" -- important to Mack + + 4. How complex can the queries become? Can we do truck-sharing-based + queries, where we query as a retailer that expects to share trucks + across several producer orders (e.g., in the same origin region). + Alternatively, can we order multiple items from a single producer to + ship on a single truck route? Presumably, we can? + + 5. Define a query language? + --- Not until ActorScript implements variant types. + + 6. [Canister upgrades](#canisterupgrades) + +-------------------------------------------------------------------------------- + + +Canister upgrades +==================== + +The standards evolve according to the "central authority" (cf PE spec +document), who we identify as the github repo and open source +developer community that surrounds this implementation. + +Updating the types in the standards requires changing the file `serverTypes.as` +mentioned above, and performing a canister upgrade on the running +system. Similarly, to evolve the behavioral definition of standards, the +implementation of this actor will change (in `serverActor.as` and +`serverModel.as`), and will also require a canister upgrade. + + +--------------------------------------------------------------------------------------- + diff --git a/stdlib/examples/produce-exchange/serverActor.as b/stdlib/examples/produce-exchange/serverActor.as new file mode 100644 index 00000000000..988d941523b --- /dev/null +++ b/stdlib/examples/produce-exchange/serverActor.as @@ -0,0 +1,889 @@ +/** + + [Background]($DOCURL/examples/produce-exchange#Produce-Exchange-Standards-Specification) + -------------------- +*/ + +actor server = { + +/** + Server Actor + ======================================= + + The `Server` actor defines an interface for messages sent + by all participants, and the responses received in return. + + See also: + + - [client-server types]($DOCURL/examples/produce-exchange/serverTypes.md#server-types). + - the **[server `Model` class]($DOCURL/examples/produce-exchange/serverModel.html)**. + + + Registrar-based ingress messages + ================================================ + + The registrar provides functions to add and to remove entities from + the following (mostly-static) tables: + + - **Static resource information:** truck types, produce types and region information. + - **Participant information:** producers, retailers and transporters. + - **Dynamic resource information:** inventory, routes and reservations. + + For each of the entities listed above, we have an add (`Add`) + and remove (`Rem`) function below, prefixed by `registrar`-, and + suffixed by one of the entities in the following list: + + - `User`, + - `TruckType`, + - `Region`, + - `Produce`, + - `Producer`, + - `Retailer`, or + - `Transporter`. + + + `User` + ========= + Messages about users. + + + `registrarAddUser` + ---------------------- + Register a new user, who may play several roles in the exchange. + + The given `user_name` must be unique to the exchange; the operation fails otherwise. + + */ + + registrarAddUser( + user_name: Text, + public_key: Text, + description: Text, + region: RegionId, + isDeveloper: Bool, + isProducer: Bool, + isRetailer: Bool, + isTransporter: Bool + ) : async ?UserId { + getModel().addUser( + user_name, + public_key, + description, + region, + isDeveloper, + isProducer, + isRetailer, + isTransporter + ) + }; + + /** + `allUserInfo` + ------------- + Get info for all users. + */ + allUserInfo() : async [UserInfo] { + getModel().userTable.allInfo() + }; + + /** + `getUserInfo` + --------------------------- + Get the information associated with a user, based on its id. + */ + getUserInfo(id:UserId) : async ?UserInfo { + getModel() + .userTable.getInfo(id) + }; + + /** + `TruckType` + ============== + Messages about truck types. + */ + + + /** + `reigstrarAddTruckType` + ------------------------ + + */ + + registrarAddTruckType( + short_name_: Text, + description_: Text, + capacity_ : Weight, + isFridge_ : Bool, + isFreezer_ : Bool, + ) : async ?TruckTypeId { + getModel() + .truckTypeTable.addInfoGetId( + func (id_:TruckTypeId) : TruckTypeInfo = + + // xxx: AS should have more concise syntax for this pattern, below: + // two problems I see, that are separate: + // 1: repeating the label/variable name, which is the same in each case, twice. + // 2: explicit type annotations, because of "type error, cannot infer type of forward variable ..." + // but two other sources exist for each type: the type of `insert` is known, and hence, this record has a known type, + // and, the type of each of these `variables` is known, as well. + + shared { + id=id_ :TruckTypeId; + short_name=short_name_:Text; + description=description_:Text; + capacity=capacity_:Weight; + isFridge=isFridge_:Bool; + isFreezer=isFreezer_:Bool; + }) + }; + + /** + `registrarRemTruckType` + --------------------- + */ + + registrarRemTruckType( + id: TruckTypeId + ) : async ?() { + getModel().truckTypeTable.remGetUnit(id) + }; + + /** + `getTruckTypeInfo` + --------------------- + */ + + getTruckTypeInfo( + id: TruckTypeId + ) : async ?TruckTypeInfo { + getModel().truckTypeTable.getInfo(id) + }; + + /** + `allTruckTypeInfo` + --------------------- + */ + + allTruckTypeInfo() : async [TruckTypeInfo] { + getModel().truckTypeTable.allInfo() + }; + + + /** + `Region` + ============== + Messages about regions. + + */ + + /** + `registrarAddRegion` + --------------------- + adds the region to the system; fails if the given information is + invalid in any way. + */ + + registrarAddRegion( + short_name_: Text, + description_: Text, + ) : async ?RegionId { + getModel().regionTable.addInfoGetId( + func (id_:RegionId) : RegionInfo = + shared { + id = id_:RegionId; + short_name=short_name_:Text; + description=description_:Text + }) + }; + + /** + `registrarRemRegion` + --------------------- + + returns `?()` on success, and `null` on failure. + */ + + registrarRemRegion( + id: RegionId + ) : async ?() { + getModel().regionTable.remGetUnit(id) + }; + + /** + `getRegionInfo` + --------------------- + + See also: [server type `RegionInfo`]($DOCURL/examples/produce-exchange/serverTypes.md#regioninfo). + + */ + + getRegionInfo( + id: RegionId + ) : async ?RegionInfo { + getModel().regionTable.getInfo(id) + }; + + + /** + `allRegionInfo` + --------------------- + + See also: [server type `RegionInfo`]($DOCURL/examples/produce-exchange/serverTypes.md#regioninfo). + + */ + + allRegionInfo() : async [RegionInfo] { + getModel().regionTable.allInfo() + }; + + + /** + `Produce` + ================= + Messages about produce + + */ + + /** + `registrarAddProduce` + --------------------- + + adds the produce to the system; fails if the given information is invalid in any way. + */ + + registrarAddProduce( + short_name_: Text, + description_: Text, + grade_: Grade, + ) : async ?ProduceId { + getModel().produceTable.addInfoGetId( + func (id_:ProduceId) : ProduceInfo = + shared { + id = id_:ProduceId; + short_name=short_name_:Text; + description=description_:Text; + grade=grade_:Grade + }) + }; + + /** + `registrarRemProduce` + --------------------- + + returns `?()` on success, and `null` on failure. + */ + + registrarRemProduce( + id: ProduceId + ) : async ?() { + getModel().produceTable.remGetUnit(id) + }; + + + /** + `getProduceInfo` + --------------------- + */ + + getProduceInfo( + id: ProduceId + ) : async ?ProduceInfo { + getModel().produceTable.getInfo(id) + }; + + /** + `allProduceInfo` + --------------------- + */ + + allProduceInfo() : async [ProduceInfo] { + getModel().produceTable.allInfo() + }; + + /** + `Producer` + =============== + Messages about producers. + + */ + + /** + `registrarAddProducer` + --------------------- + + adds the producer to the system; fails if the given region is non-existent. + */ + + registrarAddProducer( + short_name_: Text, + description_: Text, + region_: RegionId, + ) : async ?ProducerId { + getModel().producerTable.addInfoGetId( + func(id_:ProducerId):ProducerInfo { + shared { + id=id_:ProducerId; + short_name=short_name_:Text; + description=description_:Text; + region=region_:RegionId; + inventory=[]; + reserved=[]; + } + }) + }; + + /** + `registrarRemProducer` + --------------------- + + returns `?()` on success, and `null` on failure. + */ + + registrarRemProducer( + id: ProducerId + ) : async ?() { + getModel().producerTable.remGetUnit(id) + }; + + + /** + `getProduceInfo` + --------------------- + */ + + getProducerInfo( + id: ProducerId + ) : async ?ProducerInfo { + getModel().producerTable.getInfo(id) + }; + + /** + `allProducerInfo` + --------------------- + */ + + allProducerInfo() : async [ProducerInfo] { + getModel().producerTable.allInfo() + }; + + + + /** + `Retailer` + ============ + Messages to `Add`, `Rem` and `Inspect` retailers. + */ + + /** + `registrarAddRetailer` + --------------------- + + adds the producer to the system; fails if the given region is non-existent. + */ + + registrarAddRetailer( + short_name_: Text, + description_: Text, + region_: RegionId, + ) : async ?RetailerId { + getModel().retailerTable.addInfoGetId( + func(id_:RetailerId):RetailerInfo { + shared { + id=id_:RetailerId; + short_name=short_name_:Text; + description=description_:Text; + region=region_:RegionId + } + }) + }; + + /** + `registrarRemRetailer` + --------------------- + + returns `?()` on success, and `null` on failure. + */ + + registrarRemRetailer( + id: RetailerId + ) : async ?() { + getModel().retailerTable.remGetUnit(id) + }; + + /** + `getRetailerInfo` + --------------------- + */ + + getRetailerInfo( + id: RetailerId + ) : async ?RetailerInfo { + getModel().retailerTable.getInfo(id) + }; + + /** + `allRetailerInfo` + --------------------- + */ + + allRetailerInfo() : async [RetailerInfo] { + getModel().retailerTable.allInfo() + }; + + + /** + `Transporter` + ================ + Messages to `Add`, `Rem` and `Inspect` transporters. + */ + + /** + `registrarAddTransporter` + --------------------- + + */ + registrarAddTransporter( + short_name_: Text, + description_: Text, + ) : async ?TransporterId { + getModel().transporterTable.addInfoGetId( + func(id_:TransporterId):TransporterInfo { + shared { + id=id_:TransporterId; + short_name=short_name_:Text; + description=description_:Text; + routes=[]; + reserved=[]; + } + }) + + }; + + /** + `registrarRemTransporter` + --------------------- + + */ + + registrarRemTransporter( + id: TransporterId + ) : async ?() { + getModel().transporterTable.remGetUnit(id) + }; + + /** + `getTransporterInfo` + --------------------- + */ + + getTransporterInfo( + id: TransporterId + ) : async ?TransporterInfo { + getModel().transporterTable.getInfo(id) + }; + + + /** + `allTransporterInfo` + --------------------- + */ + + allTransporterInfo() : async [TransporterInfo] { + getModel().transporterTable.allInfo() + }; + + + /** + `Producer`-based ingress messages: + ========================================== + */ + + /** + `producerAddInventory` + ------------------------------------------ + + See also [Model.producerAddInventory]($DOCURL/stdlib/examples/produce-exchange/serverModel.md#produceraddinventory) + */ + producerAddInventory( + id: ProducerId, + prod: ProduceId, + quant:Quantity, + weight:Weight, + ppu: PricePerUnit, + begin:Date, + end: Date, + comments: Text, + ) : async ?InventoryId { + getModel(). + producerAddInventory( + null, id, prod, quant, weight, ppu, begin, end, comments) + }; + + /** + `producerUpdateInventory` + ------------------------------------------ + + */ + producerUpdateInventory( + iid: InventoryId, + id: ProducerId, + prod: ProduceId, + quant:Quantity, + weight:Weight, + ppu: PricePerUnit, + begin:Date, + end: Date, + comments: Text, + ) : async ?() { + getModel(). + producerUpdateInventory( + iid, id, prod, quant, weight, ppu, begin, end, comments) + }; + + /** + `producerRemInventory` + --------------------------- + */ + producerRemInventory(id:InventoryId) : async ?() { + getModel() + .producerRemInventory(id) + }; + + /** + `producerAllInventoryInfo` + --------------------------- + */ + producerAllInventoryInfo(id:ProducerId) : async ?[InventoryInfo] { + getModel() + .producerAllInventoryInfo(id) + }; + + /** + `producerReservations` + --------------------------- + */ + producerReservations(id:ProducerId) : async ?[ReservedInventoryInfo] { + getModel() + .producerReservations(id) + }; + + + /** + Inventory and produce information + ====================================== + Messages about produce and inventory + + */ + + /** + `produceMarketInfo` + --------------------------- + The last sales price for produce within a given geographic area; null region id means "all areas." + */ + produceMarketInfo(id:ProduceId, reg:?RegionId) : async ?[ProduceMarketInfo] { + getModel() + .produceMarketInfo(id, reg) + }; + + + /** + `allInventoryInfo` + --------------------------- + Get the information for all known inventory. + */ + allInventoryInfo() : async [InventoryInfo] { + getModel() + .inventoryTable.allInfo() + }; + + /** + `getInventoryInfo` + --------------------------- + Get the information associated with inventory, based on its id. + */ + getInventoryInfo(id:InventoryId) : async ?InventoryInfo { + getModel() + .inventoryTable.getInfo(id) + }; + + + /** + `Transporter`-based ingress messages: + =========================================== + */ + + /** + `transporterAddRoute` + --------------------------- + */ + transporterAddRoute( + trans: TransporterId, + rstart: RegionId, + rend: RegionId, + start: Date, + end: Date, + cost: Price, + ttid: TruckTypeId + ) : async ?RouteId { + getModel().transporterAddRoute(trans, rstart, rend, start, end, cost, ttid) + }; + + /** + `transporterRemRoute` + --------------------------- + */ + transporterRemRoute(id:RouteId) : async ?() { + getModel() + .transporterRemRoute(id) + }; + + /** + `transporterAllRouteInfo` + --------------------------- + */ + transporterAllRouteInfo(id:TransporterId) : async ?[RouteInfo] { + getModel() + .transporterAllRouteInfo(id) + }; + + /** + `transporterAllReservationInfo` + --------------------------- + */ + transporterAllReservationInfo(id:TransporterId) : async ?[ReservedRouteInfo] { + getModel() + .transporterAllReservationInfo(id) + }; + + /** + `allRouteInfo` + --------------------------- + Get the information for all known routes. + */ + allRouteInfo() : async [RouteInfo] { + getModel() + .routeTable.allInfo() + }; + + /** + `Retailer`-based ingress messages: + ====================================== + + `retailerQueryAll` + --------------------------- + + TODO-Cursors (see above). + + */ + retailerQueryAll(id:RetailerId) : async ?QueryAllResults { + getModel(). + retailerQueryAll(id) + }; + + /** + `retailerQueryDates` + --------------------------- + + Retailer queries available produce by delivery date range; returns + a list of inventory items that can be delivered to that retailer's + geography within that date. + + */ + retailerQueryDates( + id:RetailerId, + begin:Date, + end:Date + ) : async ?[InventoryInfo] + { + getModel(). + retailerQueryDates(id, begin, end) + }; + + /** + `retailerReserve` + --------------------------- + */ + retailerReserve( + id:RetailerId, + inventory:InventoryId, + route:RouteId) : async ?(ReservedInventoryId, ReservedRouteId) + { + getModel(). + retailerReserve(id, inventory, route) + }; + + /** + `retailerReservations` + --------------------------- + + TODO-Cursors (see above). + + */ + retailerReservations(id:RetailerId) : + async ?[(ReservedInventoryInfo, + ReservedRouteInfo)] + { + getModel(). + retailerAllReservationInfo(id) + }; + + + + /** + + Developer-based ingress messages: + ======================================================== + + The following messages may originate from developers + + */ + + /** + `getCounts` + ---------- + */ + + getCounts() : async ProduceExchangeCounts { + let m = getModel(); + shared { + truck_type_count = m.truckTypeTable.count(); + region_count = m.regionTable.count(); + produce_count = m.produceTable.count(); + inventory_count = m.inventoryTable.count(); + reserved_inventory_count = m.reservedInventoryTable.count(); + producer_count = m.producerTable.count(); + retailer_count = m.retailerTable.count(); + transporter_count = m.transporterTable.count(); + route_count = m.routeTable.count(); + reserved_route_count = m.reservedRouteTable.count(); + + retailer_query_count = m.retailerQueryCount; + retailer_query_cost = m.retailerQueryCost; + retailer_join_count = m.retailerJoinCount; + } + }; + + /** + `devViewGMV` + ------------- + + MVP: + + > Developer can see the GMV, the aggregate sum of how many sales have +been processed +*/ + + devViewGMV() : async ?Nat { + nyi() + }; + + /** + `devViewQueries` + ---------------- + + MVP: + + > Developer can see how many aggregate queries have been made by all retailers + + */ + + devViewQueries() : async ?Nat { + ?getModel().retailerQueryCount; + }; + + + /** + `devViewReservations` + ---------------------- + + MVP: + + > Developer can see how many aggregate sales orders have been made by all retailers + + */ + + devViewReservations() : async Nat { + getModel().reservedInventoryTable.count() + }; + + /** + `devViewProducers` + ------------------- + + MVP: + + > Developer can see how many producers in the system and how many goods each has + + See also [`producerInfo`](#producerinfo). + + */ + + devViewProducers() : async [ProducerInfo] { + getModel().producerTable.allInfo() + }; + + + /** + `devViewTransporters` + ------------------- + + MVP: + + > Developer can see how many transporters in the system and how many routes each has + + See also [`transporterInfo`](#transporterinfo). + + */ + + devViewTransporters() : async [TransporterInfo] { + getModel().transporterTable.allInfo() + }; + + /** + `devViewRetailers` + ------------------- + + MVP: + + > Developer can see how many retailers in the system and how many queries and how many sales orders + + See also [`retailerInfo`](#retailerinfo). + + */ + + devViewRetailers() : async [RetailerInfo] { + getModel().retailerTable.allInfo() + }; + + + /////////////////////////////////////////////////////////////////////////// + // @Omit: + + // See `serverModel.as` for the Model class's implementation + + // Matthew-Says: + // There are two initialization options for the model field: + // 1. Call Model() directly; using this option now. + // 2. Call Model() later, when we try to access the model field. + + private var model : Model = Model(); // OPTION 2: null; + + private getModel() : Model { + model + // OPTION 2: + // switch model { + // case (null) { + // let m = Model(); + // model := ?m; m + // }; + // case (?m) m; + // } + // + }; + +/** + End of interface definition +----------------------------------- + With the following closing brace, the interface of the `Server` is thusly defined. + */ +};// end: actor class `Server` diff --git a/stdlib/examples/produce-exchange/serverModel.as b/stdlib/examples/produce-exchange/serverModel.as new file mode 100644 index 00000000000..1086b319adf --- /dev/null +++ b/stdlib/examples/produce-exchange/serverModel.as @@ -0,0 +1,1369 @@ +/** + +[Background]($DOCURL/examples/produce-exchange#Produce-Exchange-Standards-Specification) +-------------------- + +Server Model +=============================== + +**[`Server` actor class]($DOCURL/examples/produce-exchange/serverActor.html)** +defines an interface for messages sent by all participants, and the responses received in return. + + +Here, we depart from defining messages and their data types, and +instead turn our attention to the _internal representation_ of the +server actor's state, defined by the **[server model +types]($DOCURL/examples/produce-exchange/serverModelTypes.html)**, +and the _outer behavior_ of this `Server` actor. The latter behavior +is part of the standards definition, and the internal type definitions that it +uses are is not. + +*/ + +type RouteInventoryMap = Trie<(RouteId, InventoryId), (RouteDoc, InventoryDoc)>; + +class Model() { + + + + + /** + Misc helpers + ================== + */ + + private debug (t:Text) { print t }; + private debugInt (i:Int) { printInt i }; + + private debugOff (t:Text) { }; + private debugIntOff (i:Int) { }; + + private unwrap(ox:?T) : T { + switch ox { + case (null) { assert false ; unwrap(ox) }; + case (?x) x; + } + }; + + private idIsEq(x:Nat,y:Nat):Bool { x == y }; + + private textIsEq(x:Text,y:Text):Bool { x == y }; + + private idPairIsEq(x:(Nat,Nat),y:(Nat,Nat)):Bool { x.0 == y.0 and x.1 == y.1 }; + + private idHash(x:Nat):Hash { Hash.hashOfInt(x) }; + + private idPairHash(x:(Nat,Nat)):Hash { Hash.hashOfIntAcc(Hash.hashOfInt(x.0), x.1) }; + + private keyOf(x:Nat):Key { + new { key = x ; hash = idHash(x) } + }; + + private keyOfIdPair(x:Nat, y:Nat):Key<(Nat,Nat)> { + new { key = (x,y) ; hash = idPairHash(x,y) } + }; + + private keyOfText(x:Text):Key { + new { key = x ; hash = Hash.hashOfText(x) } + }; + + /** + Misc counters + ================== + */ + + var joinCount = 0; + + +/** + +Representation +================= + +We use several public-facing **tables**, implemented as document tables. + + +CRUD operations via [document tables]($DOCURL/docTable.html) +---------------------------------------------------- + +This server model provides [document table]($DOCURL/docTable.html) objects to hold the +following kinds of entities in the exchange: + +- **Static resource information:** truck types, produce types and region information. +- **Participant information:** producers, retailers and transporters. +- **Dynamic resource information:** inventory, routes and reservations. + +For each of the entity kinds listed above, we have a corresponding +`DocTable` defined below that affords ordinary CRUD +(create-read-update-delete) operations. + + +Secondary maps +---------------------- + +See also [indexing by `RegionId`](#indexing-by-regionid). + +The secondary maps and intra-document maps enable faster query +performance. + +When we update the primary tables, we update any associated +secondary indices maps and intra-document maps as well, to keep them +all in sync. + +**To do:** We initialize the primary tables with callbacks that +refer to the secondary maps; the callbacks intercept add/remove +operations to maintain consistency between the primary tables and the +secondary maps. + +*/ + + /** + `userTable` + ----------------- + */ + + var userTable : UserTable = + DocTable( + 0, + func(x:UserId):UserId{x+1}, + func(x:UserId,y:UserId):Bool{x==y}, + idHash, + func(doc:UserDoc):UserInfo = shared { + id=doc.id; + user_name=doc.user_name; + public_key=doc.public_key; + description=doc.description; + region=doc.region; + producerId=doc.producerId; + transporterId=doc.transporterId; + retailerId=doc.retailerId; + isDeveloper=doc.isDeveloper; + }, + func(info:UserInfo):?UserDoc = ?(new { + id=info.id; + user_name=info.user_name; + public_key=info.public_key; + description=info.description; + region=info.region; + producerId=info.producerId; + transporterId=info.transporterId; + retailerId=info.retailerId; + isDeveloper=info.isDeveloper; + }), + ); + + + /** + `truckTypeTable` + ----------------- + */ + + var truckTypeTable : TruckTypeTable = + DocTable( + 0, + func(x:TruckTypeId):TruckTypeId{x+1}, + func(x:TruckTypeId,y:TruckTypeId):Bool{x==y}, + idHash, + func(doc:TruckTypeDoc):TruckTypeInfo = shared { + id=doc.id; + short_name=doc.short_name; + description=doc.description; + capacity=doc.capacity; + isFridge=doc.isFridge; + isFreezer=doc.isFreezer; + }, + func(info:TruckTypeInfo):?TruckTypeDoc = ?(new { + id=info.id; + short_name=info.short_name; + description=info.description; + capacity=info.capacity; + isFridge=info.isFridge; + isFreezer=info.isFreezer; + }), + ); + + /** + `regionTable` + ----------------- + */ + + var regionTable : RegionTable = + DocTable( + 0, + func(x:RegionId):RegionId{x+1}, + func(x:RegionId,y:RegionId):Bool{x==y}, + idHash, + func(doc:RegionDoc):RegionInfo = shared { + id=doc.id; + short_name=doc.short_name; + description=doc.description; + }, + func(info:RegionInfo):?RegionDoc = ?(new { + id=info.id; + short_name=info.short_name; + description=info.description; + }), + ); + + /** + `produceTable` + ----------------- + */ + + var produceTable : ProduceTable = + DocTable( + 0, + func(x:ProduceId):ProduceId{x+1}, + func(x:ProduceId,y:ProduceId):Bool{x==y}, + idHash, + func(doc:ProduceDoc):ProduceInfo = shared { + id=doc.id; + short_name=doc.short_name; + description=doc.description; + grade=doc.grade; + }, + func(info:ProduceInfo):?ProduceDoc = ?(new { + id=info.id; + short_name=info.short_name; + description=info.description; + grade=info.grade; + }), + ); + + /** + `producerTable` + ----------------- + */ + + var producerTable : ProducerTable = + DocTable( + 0, + func(x:ProducerId):ProducerId{x+1}, + func(x:ProducerId,y:ProducerId):Bool{x==y}, + idHash, + func(doc:ProducerDoc):ProducerInfo = shared { + id=doc.id; + short_name=doc.short_name; + description=doc.description; + region=doc.region.id; + inventory=[]; + reserved=[]; + }, + func(info:ProducerInfo):?ProducerDoc = + switch (regionTable.getDoc(info.region)) { + case (?regionDoc) { + ?(new { + id=info.id; + short_name=info.short_name; + description=info.description; + region=regionDoc; + inventory=Table.empty(); + reserved=Table.empty(); + } + )}; + case (null) { + null + }; + } + ); + + + /** + `inventoryTable` + --------------- + */ + + var inventoryTable : InventoryTable = + DocTable( + 0, + func(x:InventoryId):InventoryId{x+1}, + func(x:InventoryId,y:InventoryId):Bool{x==y}, + idHash, + func(doc:InventoryDoc):InventoryInfo = shared { + id=doc.id; + produce=doc.produce.id; + producer=doc.producer; + quantity=doc.quantity; + weight=doc.weight; + ppu=doc.ppu; + start_date=doc.start_date; + end_date=doc.end_date; + comments=doc.comments; + }, + func(info:InventoryInfo):?InventoryDoc = { + // validate the info's producer and produce ids + switch (producerTable.getDoc(info.producer), + produceTable.getDoc(info.produce)) { + case (?producerDoc, ?produceDoc) { + ?(new { + id=info.id; + produce=produceDoc; + producer=producerDoc.id; + quantity=info.quantity; + weight=info.weight; + ppu=info.ppu; + start_date=info.start_date; + end_date=info.end_date; + comments=info.comments; + }) + }; + case _ { + null + } + }} + ); + + + /** + `transporterTable` + ----------------- + */ + + var transporterTable : TransporterTable = + DocTable ( + 0, + func(x:TransporterId):TransporterId{x+1}, + func(x:TransporterId,y:TransporterId):Bool{x==y}, + idHash, + func(doc:TransporterDoc):TransporterInfo = shared { + id=doc.id; + short_name=doc.short_name; + description=doc.description; + routes=[]; + reserved=[]; + }, + func(info:TransporterInfo):?TransporterDoc = + ?(new { + id=info.id; + short_name=info.short_name; + description=info.description; + routes=Table.empty(); + reserved=Table.empty(); + }) + ); + + /** + `retailerTable` + ----------------- + */ + + var retailerTable : RetailerTable = + DocTable( + 0, + func(x:RetailerId):RetailerId{x+1}, + func(x:RetailerId,y:RetailerId):Bool{x==y}, + idHash, + func(doc:RetailerDoc):RetailerInfo = shared { + id=doc.id; + short_name=doc.short_name; + description=doc.description; + region=doc.region.id; + reserved_routes=[]; + reserved_items=[]; + }, + func(info:RetailerInfo):?RetailerDoc { + switch (regionTable.getDoc(info.region)) + { + case (?regionDoc) { + ?(new { + id=info.id; + short_name=info.short_name; + description=info.description; + region=regionDoc; + reserved=null; + } + )}; + case (null) { null }; + }} + ); + + var retailerQueryCount : Nat = 0; + var retailerQueryCost : Nat = 0; + var retailerJoinCount : Nat = 0; + + /** + `routeTable` + ---------------- + */ + + var routeTable : RouteTable = + DocTable ( + 0, + func(x:RouteId):RouteId{x+1}, + func(x:RouteId,y:RouteId):Bool{x==y}, + idHash, + func(doc:RouteDoc):RouteInfo = shared { + id=doc.id; + transporter=doc.transporter; + truck_type=(truckTypeTable.getInfoOfDoc())(doc.truck_type); + start_region=doc.start_region.id; + end_region=doc.end_region.id; + start_date=doc.start_date; + end_date=doc.end_date; + cost=doc.cost; + }, + func(info:RouteInfo):?RouteDoc { + switch (transporterTable.getDoc(info.transporter), + truckTypeTable.getDoc(info.truck_type.id), + regionTable.getDoc(info.start_region), + regionTable.getDoc(info.end_region)) + { + case (?_, ?truckType, ?startRegion, ?endRegion) { + ?(new { + id=info.id; + transporter=info.transporter; + truck_type=truckType; + start_region=startRegion; + end_region=endRegion; + start_date=info.start_date; + end_date=info.end_date; + cost=info.cost; + }) + }; + case _ { null } + }} + ); + + + /** + `reservedInventoryTable` + --------------------------- + */ + + var reservedInventoryTable : ReservedInventoryTable = + DocTable( + 0, + func(x:ReservedInventoryId):ReservedInventoryId{x+1}, + func(x:ReservedInventoryId,y:ReservedInventoryId):Bool{x==y}, + idHash, + func(doc:ReservedInventoryDoc):ReservedInventoryInfo = shared { + id=doc.id; + item=doc.item.id; + retailer=doc.retailer + }, + func(info:ReservedInventoryInfo):?ReservedInventoryDoc = { + // validate the info's item id + switch (inventoryTable.getDoc(info.id), + retailerTable.getDoc(info.retailer)) { + case (?item_, ?_) { + ?(new { + id=info.id; + item=item_:InventoryDoc; + retailer=info.retailer; + }) + }; + case _ { + null + } + }} + ); + + /** + `reservedRouteTable` + ---------------- + */ + + var reservedRouteTable : ReservedRouteTable = + DocTable( + 0, + func(x:ReservedRouteId):ReservedRouteId{x+1}, + func(x:ReservedRouteId,y:ReservedRouteId):Bool{x==y}, + idHash, + func(doc:ReservedRouteDoc):ReservedRouteInfo = shared { + id=doc.id; + route=doc.route.id; + retailer=doc.retailer + }, + func(info:ReservedRouteInfo):?ReservedRouteDoc = { + // validate the info's item id + switch (routeTable.getDoc(info.id), + retailerTable.getDoc(info.retailer)) { + case (?route_, ?_) { + ?(new { + id=info.id; + route=route_:RouteDoc; + retailer=info.retailer; + }) + }; + case _ { + null + } + }} + ); + + /** + Indexing by `UserName` + ===================================== + */ + + private var usersByUserName + : UserNameMap = null; + + /** + + Indexing by `RegionId` + ===================================== + + For efficient joins, we need some extra indexing. + + Regions as keys in special global maps + --------------------------------------- + - inventory (across all producers) keyed by producer region + - routes (across all transporters) keyed by source region + - routes (across all transporters) keyed by destination region + + Routes by region + ---------------------------- + + the actor maintains a possibly-sparse 3D table mapping each + region-region-routeid triple to zero or one routes. First index + is destination region, second index is source region; this 2D + spatial coordinate gives all routes that go to that destination + from that source, keyed by their unique route ID, the third + coordinate of the mapping. + + */ + + private var routesByDstSrcRegions : ByRegionPairRouteMap = null; + + /** + Inventory by region + ---------------------------- + + the actor maintains a possibly-sparse 3D table mapping each + sourceregion-producerid-inventoryid triple to zero or one + inventory items. The 1D coordinate sourceregion gives all of the + inventory items, by producer id, for this source region. + + */ + + private var inventoryByRegion : ByRegionInventoryMap = null; + + /** + + Reserved inventory by produce and region + -------------------------------------------- + + The `produceMarketInfo` query asks the server for market info: + + > the last sales price for produce within a given geographic area + + To answer this query more efficiently under a system with many +reservations across region and produce kind, the following mapping +maintains a 3D table of reservations organized by +region-produce-reservationid coordinates. + + There need only be one reservationid for any given region produce +pair: without affecting the ability to answer the query above, we can +drop reservations for a given region-produce that are older than newer +reservations for the same sub-space. + + Alternatively, we need not drop these older records, and +instead, we could do a weighted average of the entire reservation +history to answer market info queries more accurately; or, to save +space, we could eventually maintain a running average, rather than +merely forget the older prices. Doing either of these is more complex +than the MVP goals, however. + + */ + private var reservationsByProduceByRegion + : ByProduceByRegionInventoryReservationMap = null; + + + /** + + Future work: Indexing by time + -------------------------------- + For now, we won't try to index based on days. + + If and when we want to do so, we would like to have a spatial + data structure that knows about each object's "interval" in a + single shared dimension (in time): + + - inventory, by availability window (start day, end day) + - routes, by transport window (departure day, arrival day) + + */ + + /** + + Message-response specifications + ====================================================== + + As explained in the `README.md` file, this actor also gives a + behavioral spec of the exchange's semantics, by giving a prototype + implementation of this behavior (and wrapped trivially by `Server`). + + The functional behavior of this interface, but not implementation + details, are part of the formal spec. + + */ + + /** + + `User`-oriented operations + ========================================== + + */ + + + /** + + `addUser` + --------- + + The given `user_name` must be unique to the exchange; the operation fails otherwise. + + */ + addUser( + user_name_: Text, + public_key_: Text, + description_: Text, + region_: RegionId, + isDeveloper_: Bool, + isProducer: Bool, + isRetailer: Bool, + isTransporter: Bool + ) : ?UserId { + + /**- Fail immediately if the user name is already taken: */ + switch (Trie.find(usersByUserName, keyOfText(user_name_), textIsEq)) { + case null {}; + case (?_) { return null }; + }; + + /**- Fail immediately if the region Id is invalid: */ + switch (regionTable.getDoc(region_)) { + case null { return null }; + case (?_) {}; + }; + + /** Input is valid: All subsequent operations will succeed: */ + + /**- Create a producer role for the user: */ + let prId = if isProducer { producerTable.addInfoGetId( + func(id_:ProducerId):ProducerInfo { + shared { + id=id_:ProducerId; + short_name=user_name_; + description=description_; + region=region_; + inventory=[]; + reserved=[]; + } + }) } else null; + + /**- Create a transporter role for the user: */ + let trId = if isTransporter { transporterTable.addInfoGetId( + func(id_:TransporterId):TransporterInfo { + shared { + id=id_:TransporterId; + short_name=user_name_; + description=description_; + routes=[]; + reserved=[]; + } + }) } else null; + + /**- Create a retailer role for the user: */ + let rrId = if isRetailer { retailerTable.addInfoGetId( + func(id_:RetailerId):RetailerInfo { + shared { + id=id_; + short_name=user_name_; + description=description_; + region=region_:RegionId; + } + }) } else null; + + /**- Record the user information: */ + let id = userTable.addInfoGetId( + func (id_: UserId) : UserInfo = + shared { + id = id_; + user_name = user_name_; + public_key = public_key_; + description = description_; + region = region_; + producerId = prId; + transporterId = trId; + retailerId = rrId; + isDeveloper = isDeveloper_; + }); + + /**- Record the mapping from user-chosen name to exchange-chosen id: */ + usersByUserName := + Trie.insertFresh( + usersByUserName, + keyOfText(user_name_), textIsEq, + unwrap(id) + ); + + /**- return the id */ + id + }; + + + /** + + `Produce`-oriented operations + ========================================== + + */ + + + /** + `produceMarketInfo` + --------------------------- + The last sales price for produce within a given geographic area; null region id means "all areas." + */ + produceMarketInfo(produce_id:ProduceId, region_oid:?RegionId) : ?[ProduceMarketInfo] { + // switch (Map.find>>( + // reservationsByProduceByRegion, + // produce_id, idIsEq)) { + // case null { return null }; + null + }; + + /** + + `Producer`-facing operations + ========================================== + + */ + + + /** + // `producerAllInventoryInfo` + // --------------------------- + */ + producerAllInventoryInfo(id:ProducerId) : ?[InventoryInfo] { + let doc = switch (producerTable.getDoc(id)) { + case null { return null }; + case (?doc) { doc }; + }; + ?Map.toArray( + doc.inventory, + func (_:InventoryId,doc:InventoryDoc):[InventoryInfo] = + [inventoryTable.getInfoOfDoc()(doc)] + ) + }; + + /** + `producerAddInventory` + --------------------------- + + */ + producerAddInventory( + iid_ : ?InventoryId, + id_ : ProducerId, + produce_id : ProduceId, + quantity_ : Quantity, + weight_ : Weight, + ppu_ : Price, + start_date_: Date, + end_date_ : Date, + comments_ : Text, + ) : ?InventoryId + { + /** The model adds inventory and maintains secondary indicies as follows: */ + + /**- Validate these ids; fail fast if not defined: */ + let oproducer : ?ProducerDoc = producerTable.getDoc(id_); + let oproduce : ?ProduceDoc = produceTable.getDoc(produce_id); + let (producer_, produce_) = { + switch (oproducer, oproduce) { + case (?producer, ?produce) (producer, produce); + case _ { return null }; + }}; + + /**- Create the inventory item document: */ + let (_, item) = { + switch (inventoryTable.addInfoAs(iid_, + func(iid:InventoryId):InventoryInfo{ + shared { + id = iid :InventoryId; + produce = produce_id:ProduceId; + producer = id_ :ProducerId; + quantity = quantity_ :Quantity; + weight = weight_ :Weight; + ppu = ppu_ :Price; + start_date=start_date_:Date; + end_date =end_date_ :Date; + comments =comments_ :Text; + }; + })) { + case (?item) { item }; + case (null) { unreachable() }; + } + }; + + /**- Update the producer's inventory collection to hold the new inventory document: */ + let updatedInventory = + Map.insertFresh( + producer_.inventory, + keyOf(item.id), + idIsEq, + item + ); + + /**- Update the producer document; xxx more concise syntax for functional record updates would be nice: */ + let _ = producerTable.updateDoc( + producer_.id, + new { + id = producer_.id; + short_name = producer_.short_name; + description = producer_.description; + region = producer_.region; + reserved = producer_.reserved; + inventory = updatedInventory; + }); + + /**- Update inventoryByRegion mapping: */ + inventoryByRegion := + Map.insert2D( + inventoryByRegion, + keyOf(producer_.region.id), idIsEq, + keyOf(producer_.id), idIsEq, + updatedInventory, + ); + + ?item.id + }; + + /** + `producerUpdateInventory` + --------------------------- + + */ + producerUpdateInventory( + iid_ : InventoryId, + id_ : ProducerId, + produce_id : ProduceId, + quantity_ : Quantity, + weight_ : Weight, + ppu_ : Price, + start_date_: Date, + end_date_ : Date, + comments_ : Text, + ) : ?() + { + /**- Validate these ids; fail here if anything is invalid: */ + let oinventory : ?InventoryDoc = inventoryTable.getDoc(iid_); + let oproducer : ?ProducerDoc = producerTable.getDoc(id_); + let oproduce : ?ProduceDoc = produceTable.getDoc(produce_id); + let (inventory_, oproducer_, produce_) = { + switch (oinventory, oproducer, oproduce) { + case (?inventory, ?producer, ?produce) { + // it's an error if the producer is not fixed across the + // update. i.e., producer A cannot update the inventory + // of producer B, only her own. + if ( inventory.producer == producer.id ) { + (inventory, producer, produce) + } else { + return null + } + }; + case _ { return null }; + }}; + + /**- remove the inventory item; given the validation above, this cannot fail. */ + assertSome<()>( producerRemInventory(iid_) ); + + /**- add the (updated) inventory item; given the validation above, this cannot fail. */ + assertSome( + producerAddInventory( + ?iid_, id_, + produce_id, + quantity_, weight_, ppu_, start_date_, end_date_, comments_ ) + ); + + /**- Success! */ + ?() + }; + + /** + `producerRemInventory` + --------------------------- + + Remove the given inventory item from the exchange. + + */ + producerRemInventory(id:InventoryId) : ?() { + + /**- validate the `id` */ + /// xxx macro for this pattern? + let doc = switch (inventoryTable.getDoc(id)) { + case null { return null }; + case (?doc) { doc }; + }; + + /**- remove document from `inventoryTable` */ + assertSome( + inventoryTable.rem( id ) + ); + + /**- remove document from `producerTable`, in several steps: */ + /// xxx macro for this pattern? + let producer = switch (producerTable.getDoc(doc.producer)) { + case null { unreachable() }; + case (?x) { x }; + }; + + /// xxx an abstraction to hide these type arguments? + let (updatedInventory, _) = + Trie.remove( + producer.inventory, keyOf(id), idIsEq); + + /// xxx syntax for functional record updates? + let updatedProducer = new { + id = producer.id ; + short_name = producer.short_name ; + description = producer.description ; + region = producer.region ; + inventory = updatedInventory ; + reserved = producer.reserved ; + }; + + assertSome( + producerTable.updateDoc( producer.id, updatedProducer ) + ); + + /**- remove document from table `inventoryByRegion`: */ + /// xxx an abstraction to hide this tuple projection, assignment, and type args? + inventoryByRegion := { + let (t, d) = Trie.remove3D( + inventoryByRegion, + keyOf(producer.region.id), idIsEq, + keyOf(producer.id), idIsEq, + keyOf(id), idIsEq + ); + assertSome(d); + t + }; + + ?() + }; + + /** + `producerReservations` + --------------------------- + + */ + producerReservations(id:ProducerId) : ?[ReservedInventoryInfo] { + let doc = switch (producerTable.getDoc(id)) { + case null { return null }; + case (?doc) { doc }; + }; + ?Map.toArray( + doc.reserved, + func (_:ReservedInventoryId, + doc:ReservedInventoryDoc): + [ReservedInventoryInfo] + = + [reservedInventoryTable.getInfoOfDoc()(doc)] + ) + }; + + + /** + `Transporter`-facing operations + ================= + */ + + + /** + `transporterAddRoute` + --------------------------- + */ + transporterAddRoute( + id_: TransporterId, + start_region_id: RegionId, + end_region_id: RegionId, + start_date_: Date, + end_date_: Date, + cost_: Price, + trucktype_id: TruckTypeId + ) : ?RouteId { + /** The model adds inventory and maintains secondary indicies as follows: */ + + /**- Validate these ids; fail fast if not defined: */ + let otransporter : ?TransporterDoc = transporterTable.getDoc(id_); + let orstart : ?RegionDoc = regionTable.getDoc(start_region_id); + let orend : ?RegionDoc = regionTable.getDoc(end_region_id); + let otrucktype : ?TruckTypeDoc = truckTypeTable.getDoc(trucktype_id); + let (transporter, start_region_, end_region_, truck_type_) = { + switch (otransporter, orstart, orend, otrucktype) { + case (?x1, ?x2, ?x3, ?x4) (x1, x2, x3, x4); + case _ { return null }; + }}; + + /**- Create the route item document: */ + let (_, route) = routeTable.addDoc( + func(routeId:RouteId):RouteDoc{ + new { + id= routeId; + transporter=id_; + truck_type=truck_type_; + start_date=start_date_; + end_date=end_date_; + start_region=start_region_; + end_region=end_region_; + cost=cost_; + }; + }); + + /**- Update the transporter's routes collection to hold the new route document: */ + let updatedRoutes = + Map.insertFresh( + transporter.routes, + keyOf(route.id), + idIsEq, + route + ); + + /**- Update the transporter document; xxx more concise syntax for functional record updates would be nice: */ + let _ = transporterTable.updateDoc( + transporter.id, + new { + id = transporter.id; + short_name = transporter.short_name; + description = transporter.description; + reserved = transporter.reserved; + routes = updatedRoutes; + }); + + /**- Update the [`routesByDstSrcRegions` mapping](#routes-by-region) using the route's regions and id */ + routesByDstSrcRegions := + Map.insert3D( + routesByDstSrcRegions, + keyOf(end_region_.id), idIsEq, + keyOf(start_region_.id), idIsEq, + keyOf(route.id), idIsEq, + route + ); + + ?route.id + }; + + /** + `transporterRemRoute` + --------------------------- + + + **Implementation summary:** + + - remove from the inventory in inventory table; use `Trie.removeThen` + - if successful, look up the producer ID; should not fail; `Trie.find` + - update the transporter, removing this inventory; use `Trie.{replace,remove}` + - finally, use route info to update the routesByRegion table, + removing this inventory item; use `Trie.remove2D`. + */ + transporterRemRoute(id:RouteId) : ?() { + nyi() + }; + + /** + `transporterAllRouteInfo` + --------------------------- + */ + transporterAllRouteInfo(id:RouteId) : ?[RouteInfo] { + let doc = switch (transporterTable.getDoc(id)) { + case null { return null }; + case (?doc) { doc }; + }; + ?Map.toArray( + doc.routes, + func (_:RouteId, + doc:RouteDoc): + [RouteInfo] + = + [routeTable.getInfoOfDoc()(doc)] + ) + }; + + /** + `transporterReservationInfo` + --------------------------- + + */ + transporterAllReservationInfo(id:TransporterId) : ?[ReservedRouteInfo] { + let doc = switch (transporterTable.getDoc(id)) { + case null { return null }; + case (?doc) { doc }; + }; + ?Map.toArray( + doc.reserved, + func (_:ReservedRouteId, + doc:ReservedRouteDoc): + [ReservedRouteInfo] + = + [reservedRouteTable.getInfoOfDoc()(doc)] + ) + }; + + + /** + `Retailer`-facing operations + ==================== + */ + + + /** + `makeReservationInfo` + ---------------------- + Prepare reservation information for a server client + based on the given inventory and route documents. + */ + makeReservationInfo(item:InventoryDoc, route:RouteDoc) : ReservationInfo { + shared { + produce =item.produce.id :ProduceId; + producer =item.producer :ProducerId; + quant =item.quantity :Quantity; + ppu =item.ppu :Price; + weight =item.weight :Weight; + prod_cost=item.quantity * item.ppu:Price; + + transporter = route.transporter :TransporterId; + truck_type = route.truck_type.id :TruckTypeId; + + region_begin = route.start_region.id:RegionId; + region_end = route.end_region.id :RegionId; + date_begin = route.start_date :Date; + date_end = route.end_date :Date; + trans_cost = route.cost: Price; + } + }; + + + /** + + `isCompatibleTruckType` + ---------------------- + + Check whether the given truck type can accommodate the given produce type. + + */ + isCompatibleTruckType(tt:TruckTypeDoc, produce:ProduceDoc) : Bool { + // todo + true + }; + + /** + + `isFeasibleReservation` + ---------------------- + Check whether the given retailer can reserve the given item and route pair. + + */ + + isFeasibleReservation(retailer:RetailerDoc, item:InventoryDoc, route:RouteDoc) : Bool { + /** - window start: check that the route begins after the inventory window begins */ + if (item.start_date > route.start_date) { + debugOff "nope: item start after route start\n"; + return false + }; + /** - window end: check that the route ends before the inventory window ends */ + if (route.end_date > item.end_date) { + debugOff "nope: route ends after item ends\n"; + return false + }; + /** - check that truck can carry the given produce */ + if (not isCompatibleTruckType(route.truck_type, item.produce)) { + debugOff "nope: truck is not compatible\n"; + return false + }; + /** - all checks pass: */ + true + }; + + /** to do: check route window inside of inventory window, e.g., + by 1 day before and 3 days after on each side: */ + + /** + `retailerQueryAll` + --------------------------- + + List all available inventory items and routes for a given retailer. + + The business logic: + - [`isCompatibleTruckType`](#isCompatibleTruckType): Checks truck and produce compatibility. + - [`isFeasibleReservation`](#isFeasibleReservation): Checks timing constraints. + - [`makeReservationInfo`](#makereservationinfo): Summarizes the reserved route and inventory documents. + + For `Trie`-based DB operations: + - [`Trie.join`]($DOCURL/trie.md#join): For the inner join on common `RegionId`s of routes and inventory. + - [`Trie.prod`]($DOCURL/trie.md#prod): For the catesian product of routes and inventory. + - [`Trie.mergeDisjoint2D`]($DOCURL/trie.md#mergeDisjoint2D): To flatten 2D mappings into 1D mappings. + */ + retailerQueryAll(id:RetailerId) : ?QueryAllResults { + retailerQueryCount += 1; + + /** - Find the retailer's document: */ + let retailer = + switch (retailerTable.getDoc(id)) { + case (null) { return null }; + case (?x) { x }}; + + /** - Find all routes whose the destination region is the retailer's region: */ + let retailerRoutes = + switch (Trie.find( + routesByDstSrcRegions, + keyOf(retailer.region.id), + idIsEq + )) { + case (null) { return null }; + case (?x) { x }}; + + /** - Temp: */ + debug "- retailer is located in region "; + debugInt (retailer.region.id); + debug ", and\n- is accessible via routes from "; + debugInt(Trie.count(retailerRoutes)); + debug " production regions.\n"; + + /** - Join: For each production region, consider all routes and inventory: */ + let queryResults : Trie = { + retailerJoinCount += 1; + Trie.join( + retailerRoutes, + inventoryByRegion, + idIsEq, + func (routes:RouteMap, + inventory:ByProducerInventoryMap) : RouteInventoryMap + { + + /** - Within this production region, consider every route-item pairing: */ + let product = Trie.prod( + routes, + /** - (To perform this Cartesian product, use a 1D inventory map:) */ + Trie.mergeDisjoint2D( + inventory, idIsEq, idIsEq), + + func (route_id:RouteId, + route :RouteDoc, + item_id :InventoryId, + item :InventoryDoc) : + ?(Key<(RouteId, InventoryId)>, + (RouteDoc, InventoryDoc)) + { + retailerQueryCost += 1; + /** - Consider the constraints of the retailer-route-item combination: */ + if (isFeasibleReservation(retailer, item, route)) { + ?( keyOfIdPair(route_id, item_id), + (route, item) + ) + } else { null } + }, + idPairIsEq + ); + product + } + )}; + + /** - The results are still organized by producer region; merge all such regions: */ + let queryResultsMerged : RouteInventoryMap = + Trie.mergeDisjoint2D( + queryResults, idIsEq, idPairIsEq); + + debug "- query result count: "; + debugInt(Trie.count<(RouteId, InventoryId), + (RouteDoc, InventoryDoc)>(queryResultsMerged)); + debug " (count of feasible route-item pairs).\n"; + + /** - Prepare reservation information for client, as an array; see also [`makeReservationInfo`](#makereservationinfo) */ + let arr = + Trie.toArray<(RouteId, InventoryId), + (RouteDoc, InventoryDoc), + ReservationInfo>( + queryResultsMerged, + func (_:(RouteId,InventoryId), (r:RouteDoc, i:InventoryDoc)) + : [ ReservationInfo ] { + [ makeReservationInfo(i, r) ] + }); + + ?arr + }; + + /** + `retailerAllReservationInfo` + --------------------------- + + */ + retailerAllReservationInfo(id:RetailerId) : + ?[(ReservedInventoryInfo, + ReservedRouteInfo)] + { + let doc = switch (retailerTable.getDoc(id)) { + case null { return null }; + case (?doc) { doc }; + }; + ?Map.toArray( + doc.reserved, + func (_:ReservedInventoryId, + ((idoc:ReservedInventoryDoc), + (rdoc:ReservedRouteDoc))) + : + [(ReservedInventoryInfo, + ReservedRouteInfo)] + = + [(reservedInventoryTable.getInfoOfDoc()(idoc), + reservedRouteTable.getInfoOfDoc()(rdoc))] + ) + }; + + /** + `retailerQueryDates` + --------------------------- + + Retailer queries available produce by delivery date range; returns + a list of inventory items that can be delivered to that retailer's + geography within that date. + + ``` + let jt = (joinTablesConditionally + (routesByDstSrcRegionTable (retailer region)) + inventoryByRegionTable + filterByDateConstraints + ); + ``` + + */ + retailerQueryDates( + id:RetailerId, + begin:Date, + end:Date + ) : ?[InventoryInfo] + { + retailerQueryCount += 1; + + nyi() + }; + + /** + `retailerReserve` + --------------------------- + */ + retailerReserve( + id:RetailerId, + inventory:InventoryId, + route:RouteId) : ?(ReservedRouteId, ReservedInventoryId) + { + nyi() + }; + +}; diff --git a/stdlib/examples/produce-exchange/serverModelTypes.as b/stdlib/examples/produce-exchange/serverModelTypes.as new file mode 100644 index 00000000000..9a463cd0f80 --- /dev/null +++ b/stdlib/examples/produce-exchange/serverModelTypes.as @@ -0,0 +1,330 @@ +/** + +[Background]($DOCURL/examples/produce-exchange#Produce-Exchange-Standards-Specification) +-------------------- + +Server Model Types +================== + +This file defines structures that implement the server actor's +internal model of the exchange. + +They are _not_ present in the public-facing interface of the server; +they are only are used in its internal model implementation +`serverModel.as`. + +*/ + + +/** + +Representation +================= + +*/ + +/** + Finite maps + ------------ + + See also: modules for [`Trie`]() + and [`DocTable`]($DOCURL/docTable.html). + +*/ + +type Map = Trie; +let Map = Trie; + + +/** + +[Document tables]($DOCURL/docTable.html) +-------------------------- + +Document tables abstract over the various finite map operations we +commonly need for each kind of entity in the exchange model. + + +Each table is a map from distinct ids to documents. These tables, and +the documents that they manage, serve as the central abstraction in +the representation of the exchange. + +Nested document structures +----------------------------- + +Below, we define top-level **document structures** for representing each `Producer`, +`Retailer` and `Transporter`'s officially published state within the exchange. + +Formally, these types define the types of forests (a set of trees with many +roots) that constitute our internal data model. + +For each kind of structure below, we assume a type of unique Id. + +We associate document information, such as textual names and +descriptions, where appropriate. + +We include other fields such as "units", "grades", +"dates" and time intervals (start/end dates), each where appropriate. + +Query implementation +--------------------- + +The retailers perform queries by joining information across the +producers and transporters tables, and their inventory and route +information, respectively. + +Orders (Reservations) implementation +------------------------------------- + +We refer to orders placed by retailrs here as "reservations", since +the latter word is less ambiguous. + +To simplify query implementation over reservations, and to improve +this query response time, we store reservations in two places, with +internal sharing: + + - The currently-reserved routes and inventory are stored with their + transporters and producers, respectively. + + - The currently-reserved routes and inventory of each retailer are + additionally stored with this retailer. + +*/ + + + +/** +`UserDoc` +------------- +User documents. +*/ + +type UserDoc = { + id: UserId; + user_name: Text; + public_key: Text; + description: Text; + region: RegionId; + producerId: ?ProducerId; + transporterId: ?TransporterId; + retailerId: ?RetailerId; + isDeveloper: Bool; +}; + +type UserTable = + DocTable; + +type UserNameMap = + Map; + +/** + `TruckType` documents + ================== + + - See also [`serverTypes`]() for `TypeTypeId` and `TypeTypeInfo`. + - See also [`DocTable`](). + */ + +type TruckTypeDoc = { + id : TruckTypeId; + short_name : Text; + description : Text; + capacity : TruckCapacity; + // xxx variant type for this temperature-control information: + isFridge : Bool; + isFreezer : Bool; +}; + +type TruckTypeTable = + DocTable; + +/** + `Region` documents + ========================== + */ + +type RegionDoc = { + id : RegionId; + short_name : Text; + description : Text; +}; + +type RegionTable = + DocTable; + +/** + `Produce` documents + ================== + */ + +type ProduceDoc = { + id : ProduceId; + short_name : Text; + description : Text; + grade : Grade; +}; + +type ProduceTable = + DocTable; + +/** + `Producer` documents + ======================= + */ + +type ProducerDoc = { + id : ProducerId; + short_name : Text; + description : Text; + region : RegionDoc; + inventory : InventoryMap; + reserved : ReservedInventoryMap; +}; + +type ProducerTable = + DocTable; + +/** + `Inventory` documents + ======================== + */ + +type InventoryDoc = { + id : InventoryId; + produce : ProduceDoc; + producer : ProducerId; + ppu : Price; + quantity : Quantity; + weight : Weight; + start_date : Date; + end_date : Date; + comments : Text; +}; + +type InventoryTable = + DocTable; + +type InventoryMap = + Map; + +type ByProducerInventoryMap = + Map>; + +/** + By-region inventory indexing + ----------------------------- +*/ +type ByRegionInventoryMap = Map; + +/** + `ReservedInventory` documents + ================================== +*/ + +type ReservedInventoryDoc= { + id : ReservedInventoryId; + retailer : RetailerId; + item : InventoryDoc; +}; + +/** + Reserved inventory indexing + ----------------------------- +*/ + +type ReservedInventoryTable = + DocTable; + +type ReservedInventoryMap = + Map; + +/** + `Retailer` documents + ================== + */ + +type RetailerDoc = { + id : RetailerId; + short_name : Text; + description : Text; + region : RegionDoc; + reserved : ReservedInventoryRouteMap; +}; + +type RetailerTable = + DocTable; + +type ReservedInventoryRouteMap = + Map; + +type ByProduceByRegionInventoryReservationMap = + Map>>; + +/** + `Transporter` documents + ================== + */ + +type TransporterDoc = { + id : TransporterId; + // no region; the transporters are the supply of routes, not "end + // points" of any single route. + short_name : Text; + description : Text; + routes : RouteMap; + reserved : ReservedRouteMap; +}; + +type TransporterTable = + DocTable; + +/** + `Route` documents + ================== + */ + +type RouteDoc = { + id : RouteId; + transporter : TransporterId; + truck_type : TruckTypeDoc; + start_region : RegionDoc; + end_region : RegionDoc; + start_date : Date; + end_date : Date; + cost : Price; + // ... more? +}; + +type RouteTable = + DocTable; + + +type RouteMap = + Map; + +/** + By-region inventory indexing + ----------------------------- +*/ + +// A possibly-sparse 2D table mapping each region-routeid pair to zero or one routes. +type ByRegionRouteMap = Map; + +// A possibly-sparse 3D table mapping each region-region-routeid triple to zero or one routes. +type ByRegionPairRouteMap = Map; + + +/** + Reserved inventory indexing + ----------------------------- +*/ + +type ReservedRouteDoc = { + id : ReservedRouteId; + retailer : RetailerId; + route : RouteDoc; +}; + +type ReservedRouteTable = DocTable; + +type ReservedRouteMap = Map; diff --git a/stdlib/examples/produce-exchange/serverTypes.as b/stdlib/examples/produce-exchange/serverTypes.as new file mode 100644 index 00000000000..d614c473199 --- /dev/null +++ b/stdlib/examples/produce-exchange/serverTypes.as @@ -0,0 +1,330 @@ +/** + +[Background]($DOCURL/examples/produce-exchange#Produce-Exchange-Standards-Specification) +-------------------- + +Server Types +================== + +This file defines structures that appear the server actor's messaging +interface. They are part of the formal standards definition. + +*/ + + +/** +Basic types +--------------------- +These types standardize representations for many common notions + +*/ + +// import Date +// xxx Dates, eventually from a standard library: +type Date = Nat; + +// xxx standard weight units? +type Weight = Nat; + +// xxx standard price units? +type Price = Nat; + +type Unit = Nat; // xxx replace with a variant type +type Grade = Nat; // xxx replace with a variant type + +type TruckCapacity = Weight; + +type Quantity = Nat; + +type PricePerUnit = Price; // needed to calculate prices +type PriceTotal = Price; + +/** +User Names +----------------- + +Each user of the exchange chooses a unique _user name_, represented as `Text`. + +In response to this choice, the exchange assigns the user a unique Id (see below); it maintains a mapping from this user name to the (internally-chosen) user Id. +*/ + +type UserName = Text; + +/** +Unique Ids +----------------- + +The produce exchange uses unique ids to concisely identify entities in the system. Each id is a number. + +Externally, these Ids give a unique identifier that is unique to its type, but not universally unique. + +Internally, each type of Id serves as a "row key" for a table (or two). + +*/ + +type UserId = Nat; + +type RegionId = Nat; +type TruckTypeId = Nat; +type ProduceId = Nat; + +type ProducerId = Nat; +type InventoryId = Nat; +type ReservedInventoryId = Nat; +type RetailerId = Nat; +type TransporterId = Nat; +type RouteId = Nat; +type ReservedRouteId = Nat; + +/** +Public info associated with Ids +===================================== +*/ + +/** +`UserInfo` +------------- +*/ + +type UserInfo = shared { + id: UserId; + user_name: Text; + public_key: Text; + description: Text; + region: RegionId; + producerId: ?ProducerId; + transporterId: ?TransporterId; + retailerId: ?RetailerId; + isDeveloper: Bool; +}; + + +/** +`TruckTypeInfo` +----------------- +*/ +type TruckTypeInfo = shared { + id : TruckTypeId; + short_name : Text; + description : Text; + capacity : TruckCapacity; + // xxx variant type for this temperature-control information: + isFridge : Bool; + isFreezer : Bool; +}; + +/** +`RegionInfo` +----------------- +*/ +type RegionInfo = shared { + id : RegionId; + short_name : Text; + description : Text; +}; + +/** +`ProduceInfo` +----------------- +*/ + +type ProduceInfo = shared { + id : ProduceId; + short_name : Text; + description : Text; + grade : Grade; +}; + +/** +`ProducerInfo` +----------------- +*/ + +type ProducerInfo = shared { + id : ProducerId; + short_name : Text; + description : Text; + region : RegionId; + inventory : [InventoryId]; + reserved : [ReservedInventoryId] +}; + +/** +`ReservedInventoryInfo` +----------------------------- +*/ + +type ReservedInventoryInfo = shared { + id : ReservedInventoryId; + retailer : RetailerId; + item : InventoryId; +}; + +/** +`InventoryInfo` +----------------- +*/ + +type InventoryInfo = shared { + id : InventoryId; + produce : ProduceId; + producer : ProducerId; + quantity : Quantity; + weight : Weight; + ppu : Price; + start_date : Date; + end_date : Date; + comments : Text; +}; + +/** +`ProduceMarketInfo` +----------------- +*/ + +// for now, this is the same as a ReservationInfo +type ProduceMarketInfo = shared { + produce: ProduceId; + producer: ProducerId; + quant: Quantity; + ppu: PricePerUnit; + transporter: TransporterId; + truck_type: TruckTypeId; + weight: Weight; + region_begin:RegionId; + region_end: RegionId; + date_begin: Date; + date_end: Date; + prod_cost: PriceTotal; + trans_cost: PriceTotal; +}; + +/** +`RetailerInfo` +----------------- +*/ + +type RetailerInfo = shared { + id : RetailerId; + short_name : Text; + description : Text; + region : RegionId; +}; + +/** +`TransporterInfo` +----------------- +*/ + +type TransporterInfo = shared { + id : TransporterId; + // no region; the transporters are the supply of routes, not "end + // points" of any single route. + short_name : Text; + description : Text; + routes : [RouteId]; + reserved : [ReservedRouteId] +}; + +/** +`ReservedRouteInfo` +----------------- +*/ + +type ReservedRouteInfo = shared { + id : ReservedRouteId; + retailer : RetailerId; + route : RouteId; +}; + +/** +`RouteInfo` +----------------- +*/ + +type RouteInfo = shared { + id : RouteId; + transporter : TransporterId; + truck_type : TruckTypeInfo; + start_region : RegionId; + end_region : RegionId; + start_date : Date; + end_date : Date; + cost : Price; +}; + +/** +`ReservationInfo` +----------------- +*/ + +type ReservationInfo = shared { + produce: ProduceId; + producer: ProducerId; + quant: Quantity; + ppu: PricePerUnit; + transporter: TransporterId; + truck_type: TruckTypeId; + weight: Weight; + region_begin:RegionId; + region_end: RegionId; + date_begin: Date; + date_end: Date; + prod_cost: PriceTotal; + trans_cost: PriceTotal; +}; + + +/** + + Produce Exchange counts + ========================= + + Developer-level, counter-based information about the exchange, including counts of each kind of entity. + +*/ +type ProduceExchangeCounts = shared { + truck_type_count : Nat; + region_count : Nat; + produce_count : Nat; + inventory_count : Nat; + reserved_inventory_count : Nat; + producer_count : Nat; + transporter_count : Nat; + route_count : Nat; + reserved_route_count : Nat; + retailer_count : Nat; + retailer_query_count : Nat; + retailer_query_cost : Nat; + retailer_join_count : Nat; +}; + + +/** +// +// Query parameters and results +// ---------------------------------- +// Externally, these types define the input and output structures for queries. +// Internally, producing instances of the result structures will require +// performing joins based on internal tables, and the information from the input structures. + +// Note: We are using "reserve", "reserved" and "reservation" in place of "order" +// below, since "order" and "ordering" have too many meanings in a +// broader CS/programming/query context. + +*/ + +// xxx same as a reservation structure; represents a possible reservation to make +type QueryAllResult = ReservationInfo; + +/** + +xxx how to represent huge result messages? + +2019.03.12 *** TODO-Cursors: Introduce the idea of "cursors", with +allocation, de-allocation and movement messages, as per discussion in +the 2019.03.12 ActorScript Team meeting. + +*/ + +type QueryAllResults = [QueryAllResult]; diff --git a/stdlib/examples/produce-exchange/test/simpleSetupAndQuery.as b/stdlib/examples/produce-exchange/test/simpleSetupAndQuery.as new file mode 100644 index 00000000000..7d9536e5cb3 --- /dev/null +++ b/stdlib/examples/produce-exchange/test/simpleSetupAndQuery.as @@ -0,0 +1,368 @@ +func printEntityCount(entname:Text, count:Nat) { + print ("- " # entname # " count: "); + printInt count; + print "\n"; +}; + +func printLabeledCost(lab:Text, cost:Nat) { + print ("- " # lab # " cost: "); + printInt cost; + print "\n"; +}; + +actor class Test() = this { + go() { + ignore(async + { + let s = server; //Server(); + + print "\nExchange setup: Begin...\n====================================\n"; + + // populate with truck types + let tta = await s.registrarAddTruckType("tta", "", 10, false, false); + let ttb = await s.registrarAddTruckType("ttb", "", 20, false, false); + let ttc = await s.registrarAddTruckType("ttc", "", 10, true, false); + let ttd = await s.registrarAddTruckType("ttd", "", 30, true, false); + let tte = await s.registrarAddTruckType("tte", "", 50, false, true); + + printEntityCount("Truck type", (await s.getCounts()).truck_type_count); + + // populate with regions + let rega = await s.registrarAddRegion("rega", ""); + let regb = await s.registrarAddRegion("regb", ""); + let regc = await s.registrarAddRegion("regc", ""); + let regd = await s.registrarAddRegion("regd", ""); + let rege = await s.registrarAddRegion("rege", ""); + + printEntityCount("Region", (await s.getCounts()).region_count); + + // populate with produce + let pea = await s.registrarAddProduce("avocado1", "avocado", 1); + let peb = await s.registrarAddProduce("avocado2", "avocado avocado", 2); + let pec = await s.registrarAddProduce("avocado3", "avocado avocado avocado", 3); + let ped = await s.registrarAddProduce("avocado4", "avocado avocado avocado avocado", 4); + let pee = await s.registrarAddProduce("avocado5", "avocado avocado avocado avocado avocado", 5); + + printEntityCount("Produce", (await s.getCounts()).produce_count); + + // populate with producers + let pra = await s.registrarAddProducer("pra", "", unwrap(rega) ); + let prb = await s.registrarAddProducer("prb", "", unwrap(rega) ); + let prc = await s.registrarAddProducer("prc", "", unwrap(regb) ); + let prd = await s.registrarAddProducer("prd", "", unwrap(rega) ); + let pre = await s.registrarAddProducer("pre", "", unwrap(regb) ); + + printEntityCount("Producer", (await s.getCounts()).producer_count); + + // populate with transporters + let tra = await s.registrarAddTransporter("tra", "" ); + let trb = await s.registrarAddTransporter("trb", "" ); + let trc = await s.registrarAddTransporter("trc", "" ); + let trd = await s.registrarAddTransporter("trd", "" ); + let tre = await s.registrarAddTransporter("tre", "" ); + + printEntityCount("Transporter", (await s.getCounts()).transporter_count); + + // populate with retailers + let rra = await s.registrarAddRetailer("rra", "", unwrap(regc) ); + let rrb = await s.registrarAddRetailer("rrb", "", unwrap(regd) ); + let rrc = await s.registrarAddRetailer("rrc", "", unwrap(rege) ); + let rrd = await s.registrarAddRetailer("rrd", "", unwrap(regc) ); + let rre = await s.registrarAddRetailer("rre", "", unwrap(rege) ); + + printEntityCount("Retailer", (await s.getCounts()).retailer_count); + + // populate with inventory + let praia = await s.producerAddInventory( + unwrap(pra), + unwrap(pea), 100, 100, 10, 0, 110, "" + ); + let praib = await s.producerAddInventory( + unwrap(pra), + unwrap(peb), 200, 200, 10, 1, 111, "" + ); + let praic = await s.producerAddInventory( + unwrap(pra), + unwrap(pec), 300, 300, 10, 2, 112, "" + ); + let prbia = await s.producerAddInventory( + unwrap(prb), + unwrap(peb), 200, 200, 10, 4, 117, "" + ); + let prbib = await s.producerAddInventory( + unwrap(prb), + unwrap(peb), 1500, 1600, 9, 2, 115, "" + ); + let prbic = await s.producerAddInventory( + unwrap(prb), + unwrap(pec), 300, 300, 10, 2, 112, "" + ); + let prcia = await s.producerAddInventory( + unwrap(prb), + unwrap(peb), 200, 200, 9, 4, 711, "" + ); + let prdib = await s.producerAddInventory( + unwrap(prb), + unwrap(peb), 1500, 1500, 7, 2, 115, "" + ); + let prdic = await s.producerAddInventory( + unwrap(prb), + unwrap(pec), 300, 300, 6, 2, 112, "" + ); + + printEntityCount("Inventory@time1", (await s.getCounts()).inventory_count); + + //////////////////////////////////////////////////////////////////////////////////// + + /**- remove some of the inventory items added above */ + + let x = await s.producerRemInventory(unwrap(prdib)); + assertSome<()>(x); + + // a double-remove should return null + assertNull<()>(await s.producerRemInventory(unwrap(prdib))); + + let y = await s.producerRemInventory(unwrap(praib)); + assertSome<()>(y); + + // a double-remove should return null + assertNull<()>(await s.producerRemInventory(unwrap(praib))); + + printEntityCount("Inventory@time2", (await s.getCounts()).inventory_count); + + //////////////////////////////////////////////////////////////////////////////////// + + /**- update some of the (remaining) inventory items added above */ + + let praic2 = await s.producerUpdateInventory( + unwrap(praic), + unwrap(pra), + unwrap(pec), 666, 300, 10, 2, 112, "" + ); + assertSome<()>(praic2); + + let prbia2 = await s.producerUpdateInventory( + unwrap(prbia), + unwrap(prb), + unwrap(peb), 200, 666, 10, 4, 117, "" + ); + assertSome<()>(prbia2); + + let prbib2 = await s.producerUpdateInventory( + unwrap(prbib), + unwrap(prb), + unwrap(peb), 666, 1600, 9, 2, 115, "" + ); + assertSome<()>(prbib2); + + printEntityCount("Inventory@time3", (await s.getCounts()).inventory_count); + + //////////////////////////////////////////////////////////////////////////////////// + + /**- populate with routes */ + + let rta_a_c_tta = await s.transporterAddRoute( + unwrap(tra), + unwrap(rega), + unwrap(regc), + 0, 20, 100, + unwrap(tta) + ); + let rta_b_c_ttb = await s.transporterAddRoute( + unwrap(tra), + unwrap(regb), + unwrap(regc), + 0, 20, 100, + unwrap(ttb) + ); + let rta_a_c_ttc = await s.transporterAddRoute( + unwrap(tra), + unwrap(rega), + unwrap(rege), + 0, 20, 100, + unwrap(ttc) + ); + + let rtb_a_c_tta = await s.transporterAddRoute( + unwrap(trb), + unwrap(regc), + unwrap(rege), + 0, 20, 40, + unwrap(tta) + ); + let rtb_b_c_ttb = await s.transporterAddRoute( + unwrap(trb), + unwrap(regb), + unwrap(regc), + 0, 40, 70, + unwrap(ttb) + ); + let rtb_a_c_ttc = await s.transporterAddRoute( + unwrap(trb), + unwrap(rega), + unwrap(regc), + 20, 40, 97, + unwrap(ttc) + ); + + let rtc_b_c_tta = await s.transporterAddRoute( + unwrap(trc), + unwrap(regb), + unwrap(regb), + 20, 40, 40, + unwrap(tta) + ); + let rtc_c_e_tta = await s.transporterAddRoute( + unwrap(trc), + unwrap(regc), + unwrap(regb), + 20, 40, 70, + unwrap(tta) + ); + let rtc_a_c_ttc = await s.transporterAddRoute( + unwrap(trc), + unwrap(rega), + unwrap(regc), + 20, 40, 97, + unwrap(ttc) + ); + + let rtd_b_c_ttb = await s.transporterAddRoute( + unwrap(trd), + unwrap(regb), + unwrap(regd), + 20, 40, 50, + unwrap(ttb) + ); + let rtd_c_e_tta = await s.transporterAddRoute( + unwrap(trd), + unwrap(regc), + unwrap(regd), + 20, 40, 70, + unwrap(tta) + ); + + let rte_a_c_ttc = await s.transporterAddRoute( + unwrap(tre), + unwrap(rega), + unwrap(regd), + 20, 40, 97, + unwrap(ttc) + ); + + printEntityCount("Route", (await s.getCounts()).route_count); + + ////////////////////////////////////////////////////////////////// + + print "\nExchange setup: Done.\n====================================\n"; + await debugDumpAll(); + + ////////////////////////////////////////////////////////////////// + + print "\nRetailer queries\n====================================\n"; + + // do some queries + await retailerQueryAll(rra); + await retailerQueryAll(rrb); + await retailerQueryAll(rrc); + await retailerQueryAll(rrd); + await retailerQueryAll(rre); + + print "\nQuery counts\n----------------\n"; + let counts = await s.getCounts(); + + printEntityCount("Retailer join", counts.retailer_join_count); + printEntityCount("Retailer query", counts.retailer_query_count); + printLabeledCost("Retailer query", counts.retailer_query_cost); + + ////////////////////////////////////////////////////////////////// + + }) + }; +}; + + +func retailerQueryAll(r:?RetailerId) : async () { + + print "\nRetailer "; + printInt (unwrap(r)); + print " sends `retailerQueryAll`\n"; + print "------------------------------------\n"; + + print "\n## Query begin:\n"; + let res = unwrap( + await server.retailerQueryAll(unwrap(r)) + ); + print "\n## Query end."; + + print "\n## Query results ("; + printInt (res.len()); + print ")\n"; + for (info in res.vals()) { + print "- "; + print (debug_show info); + print "\n"; + } +}; + +func debugDumpAll() : async () { + + print "\nTruck type info\n----------------\n"; + for ( info in ((await server.allTruckTypeInfo()).vals()) ) { + print "- "; + print (debug_show info); + print "\n"; + }; + + print "\nRegion info\n----------------\n"; + for ( info in ((await server.allRegionInfo()).vals()) ) { + print "- "; + print (debug_show info); + print "\n"; + }; + + print "\nProduce info\n----------------\n"; + for ( info in ((await server.allProduceInfo()).vals()) ) { + print "- "; + print (debug_show info); + print "\n"; + }; + + print "\nProducer info\n----------------\n"; + for ( info in ((await server.allProducerInfo()).vals()) ) { + print "- "; + print (debug_show info); + print "\n"; + }; + + print "\nTransporter info\n----------------\n"; + for ( info in ((await server.allTransporterInfo()).vals()) ) { + print "- "; + print (debug_show info); + print "\n"; + }; + + print "\nRetailer info\n----------------\n"; + for ( info in ((await server.allRetailerInfo()).vals()) ) { + print "- "; + print (debug_show info); + print "\n"; + }; + + print "\nInventory info\n----------------\n"; + for ( info in ((await server.allInventoryInfo()).vals()) ) { + print "- "; + print (debug_show info); + print "\n"; + }; + + print "\nRoute info\n----------------\n"; + for ( info in ((await server.allRouteInfo()).vals()) ) { + print "- "; + print (debug_show info); + print "\n"; + }; +}; + +let test = Test(); +test.go() diff --git a/stdlib/examples/produceExchange.as b/stdlib/examples/produceExchange.as deleted file mode 100644 index 123610735a6..00000000000 --- a/stdlib/examples/produceExchange.as +++ /dev/null @@ -1,214 +0,0 @@ -// Produce Exchange Dapp -// ===================== -// -// Start here: -// - Detailed examples: https://dfinity.atlassian.net/wiki/x/joXUBg -// - More background: https://dfinity.atlassian.net/wiki/x/4gg5Bg -// - -// Open Questions: -// ------------------------------------------------- - -// 1. Massive result messages: -// How do we represent and send these? -// -// - lazy lists? (seems "easy" from AS programmer perspective, but -// requires non-first-order data in the IDL) -// -// - list iterators? (almost as good as lazy lists, but requires -// references in the IDL, and complicates the GC story). -// -// - arrays? (expensive to build and send; can become way *too big*). -// - -// 2. For now, wan we assume that the canister is maintained by the -// central authority? - -//////////////////////////////////////////////////////////////// - -// Use the standard library of AS: -// =============================== -// - -// Collections implement internal tables: -// -------------------------------------- -// import Table (same as Trie?) xxx - -// import Date -// xxx Dates, eventually from a standard library: -type Date = Nat; - -// xxx standard weight units? -type Weight = Nat; - -// xxx standard price units? -type Price = Nat; - -///////////////////////////////////////////////////////////////// - -// Fixed types -// =============================== -// -// We assume some fixed types (for now). -// Updating these types requires a canister upgrade. -// -// ?? defined by the central authority, aka, the "canister maintainer"? -// - -type Unit = Nat; // xxx replace with a variant type -type Grade = Nat; // xxx replace with a variant type - -type TruckKind = Nat; // ??? replace with a variant type - -type TruckCapacity = Weight; - -type Quantity = Nat; - -type PricePerUnit = Price; // needed to calculate prices -type PriceTotal = Price; - -type WeightPerUnit = Weight; // needed to meet truck constraints - -type RegionId = Nat; // xxx variant type? - - -// -// Unique Ids -// ---------- -// Internally, each type of Id serves as a "row key" for a table (or two). -// - -type ProduceId = Nat; -type ProducerId = Nat; -type RetailerId = Nat; -type TruckKindId = Nat; -type InventoryId = Nat; -type TransporterId = Nat; -type RouteId = Nat; -type OrderId = Nat; - -// -// Query parameters and results -// ---------------------------- -// - -type OrderInfo = shared { - produce: ProduceId; - producer: ProducerId; - quant: Quantity; - ppu: PricePerUnit; - transporter: TransporterId; - truck_kind: TruckKindId; - weight: Weight; - region_begin:RegionId; - region_end: RegionId; - date_begin: Date; - date_end: Date; - prod_cost: PriceTotal; - trans_cost: PriceTotal; -}; - -// xxx same as an OrderInfo? If different, then how? -type QueryAllResult = shared { - produce: ProduceId; - producer: ProducerId; - quant: Quantity; - ppu: PricePerUnit; - transporter: TransporterId; - truck_kind: TruckKindId; - weight: Weight; - region_begin:RegionId; - region_end: RegionId; - date_begin: Date; - date_end: Date; - prod_cost: PriceTotal; - trans_cost: PriceTotal; -}; - -// xxx how to represent huge result messages? -type QueryAllResults = [QueryAllResult]; - -// the "Service" -actor ProduceExchange { - - // Producer-based ingress messages: - // ================================ - - producerAddInventory( - prod: ProduceId, - quant:Quantity, - ppu: PricePerUnit, - begin:Date, - end: Date, - ) : async ?InventoryId { - // xxx - null - }; - - producerRemInventory(id:InventoryId) : async ?() { - // xxx - null - }; - - producerOrders(id:ProducerId) : async ?[OrderId] { - // xxx - null - }; - - // Transporter-based ingress messages: - // =================================== - - transporterAddRoute( - trans: TransporterId, - rstart: RegionId, - rend: RegionId, - start: Date, - end: Date, - cost: Price, - tt: TruckKindId - ) : async ?RouteId { - // xxx - null - }; - - transporterRemRoute(id:RouteId) : async ?() { - // xxx - null - }; - - transporterOrders(id:TransporterId) : async ?[OrderId] { - // xxx - null - }; - - // Retailer-based ingress messages: - // =================================== - - retailerQueryAll(id:RetailerId) : async ?QueryAllResults { - // xxx - null - }; - - retailerPlaceOrder( - id:RetailerId, - inventory:InventoryId, - route:RouteId) : async ?OrderId - { - // xxx - null - }; - - retailerOrders(id:RetailerId) : async ?[OrderId] { - // xxx - null - }; - - // (Producer/Transporter/Retailer) ingress messages: - // ======================================================== - - orderInfo(id:OrderId) : async ?OrderInfo { - // xxx - null - }; - -}; diff --git a/stdlib/hash.as b/stdlib/hash.as new file mode 100644 index 00000000000..438969be11f --- /dev/null +++ b/stdlib/hash.as @@ -0,0 +1,191 @@ +/** + +Hash values +=============================== + +Hash values represent a string of _hash bits_, and support associated _bit string_ operations. + +Representations for `Hash` type +--------------------------------- + +We consider two representations for a hash value: + + - as a linked list of booleans, as `BitList` below; or, + - as a "bit vector" packed into a `Word` type (viz., `Word32`). + + +### Why? + +Initially, during the first implementation of some of the standard +library (e.g., for hash tries), we didn't have access to `Word`-based +operations and hence we instead used bit lists. + +Bit lists are closest to the mathematical definition of finite, but +unbounded bit strings, but the `Word32` provides more efficient +practical operations. + +Now, the language supports `Word` operations, so we will use bit +vectors as the preferred representation for practical system tests +going forward. + +*/ + +/** A "bit string" as a linked list of bits: */ +type BitList = ?(Bool, BitList); + +/** A "bit vector" is a bounded-length bit string packed into a single word: */ +type BitVec = Word32; + +/** + + `BitVec` + ---------------------- + A "bit vector" is a bounded-length bit string packed into a single word. + + */ + +let BitVec = new { + + func length() : Nat = 31; + + func hashOfInt(i:Int) : BitVec { + hashInt(i) + }; + + func hashOfIntAcc(h:BitVec, i:Int) : BitVec { + //hashIntAcc(h, i) + // xxx use the value h + hashInt(i) + }; + + func hashOfText(t:Text) : BitVec { + var x = 0 : Word32; + for (c in t.chars()) { + x := x ^ charToWord32(c); + }; + return x + }; + + /** Test if two lists of bits are equal. */ + func getHashBit(h:BitVec, pos:Nat) : Bool { + assert (pos <= length()); + if ((h & (natToWord32(1) << natToWord32(pos))) != natToWord32(0)) { true } + else { false } + }; + + /** Test if two lists of bits are equal. */ + func hashEq(ha:BitVec, hb:BitVec) : Bool { + ha == hb + }; + + func bitsPrintRev(bits:BitVec) { + for (j in range(0, length() - 1)) { + if (getHashBit(bits, j)) { + print "1" + } else { + print "0" + } + } + }; + + func hashPrintRev(bits:BitVec) { + for (j in range(length() - 1, 0)) { + if (getHashBit(bits, j)) { + print "1" + } else { + print "0" + } + } + }; + + func toList(v:BitVec) : BitList { + func rec(pos:Nat) : BitList { + if (pos >= length()) { null } + else { + let rest = rec(pos + 1); + if (getHashBit(v, pos)) { ?(true, rest) } + else { ?(false, rest) } + } + }; + rec(0) + } + +}; + +/** + + `BitList` + ---------- + + Encode hashes as lists of booleans. + + TODO: Replace with bitwise operations on Words, for greater efficiency. +*/ +let BitList = new { + + func hashOfInt(i:Int) : BitList { + BitVec.toList(BitVec.hashOfInt(i)) + }; + + /** Test if two lists of bits are equal. */ + func getHashBit(h:BitList, pos:Nat) : Bool { + switch h { + case null { + // XXX: Should be an error case; it shouldn't happen in our tests if we set them up right. + false + }; + case (?(b, h_)) { + if (pos == 0) { b } + else { getHashBit(h_, pos-1) } + }; + } + }; + + /** Test if two lists of bits are equal. */ + func hashEq(ha:BitList, hb:BitList) : Bool { + switch (ha, hb) { + case (null, null) true; + case (null, _) false; + case (_, null) false; + case (?(bita, ha2), ?(bitb, hb2)) { + if (bita == bitb) { hashEq(ha2, hb2) } + else { false } + }; + } + }; + + func bitsPrintRev(bits:BitList) { + switch bits { + case null { print "" }; + case (?(bit,bits_)) { + bitsPrintRev(bits_); + if bit { print "1R." } + else { print "0L." } + } + } + }; + + func hashPrintRev(bits:BitList) { + switch bits { + case null { print "" }; + case (?(bit,bits_)) { + hashPrintRev(bits_); + if bit { print "1" } + else { print "0" } + } + } + }; + +}; + + +/** + Canonical representations + --------------------------- + + Choose a canonical representation of hash values for the rest of + the standard library to use: +*/ + +type Hash = BitVec; +let Hash = BitVec; diff --git a/stdlib/list.as b/stdlib/list.as index d9fe19613b8..8aaed3240ef 100644 --- a/stdlib/list.as +++ b/stdlib/list.as @@ -1,37 +1,44 @@ -/* - * Lists, a la functional programming, in ActorScript. - */ - -// Done: -// -// - standard list definition -// - standard list recursors: foldl, foldr, iter -// - standard higher-order combinators: map, filter, etc. -// - (Every function here: http://sml-family.org/Basis/list.html) - -// TODO-Matthew: File issues: -// -// - 'assert_unit' vs 'assert_any' (related note: 'any' vs 'none') -// - apply type args, but no actual args? (should be ok, and zero cost, right?) -// - unhelpful error message around conditional parens (search for XXX below) - -// TODO-Matthew: Write: -// -// - iterator objects, for use in 'for ... in ...' patterns -// - lists+pairs: zip, split, etc -// - regression tests for everything that is below - - -// polymorphic linked lists +/** + +# List + +Purely-functional, singly-linked lists. + +*/ + +/** + Representation + ================= + + A singly-linked list consists of zero or more _cons cells_, wherein +each cell contains a single list element (the cell's _head_), and a pointer to the +remainder of the list (the cell's _tail_). + +*/ + type List = ?(T, List); +/** + Interface + ============== +*/ + let List = new { - // empty list + /** + `nil` + ------ + empty list + */ func nil() : List = null; - // test for empty list + + /** + `isNil` + -------- + test for empty list + */ func isNil(l : List) : Bool { switch l { case null { true }; @@ -39,11 +46,19 @@ let List = new { } }; - // aka "list cons" + /** + `push` + ------------- + aka "list cons" + */ func push(x : T, l : List) : List = ?(x, l); - // last element, optionally; tail recursive + /** + `last` + ---------- + last element, optionally; tail recursive + */ func last(l : List) : ?T = { switch l { case null { null }; @@ -52,7 +67,11 @@ let List = new { } }; - // treat the list as a stack; combines 'hd' and (non-failing) 'tl' into one operation + /** + `pop` + -------- + treat the list as a stack; combines the usual operations `head` and (non-failing) `tail` into one operation + */ func pop(l : List) : (?T, List) = { switch l { case null { (null, null) }; @@ -60,7 +79,11 @@ let List = new { } }; - // length; tail recursive + /** + `len` + -------- + length; tail recursive + */ func len(l : List) : Nat = { func rec(l : List, n : Nat) : Nat { switch l { @@ -71,7 +94,11 @@ let List = new { rec(l,0) }; - // array-like list access, but in linear time; tail recursive + /** + `nth` + --------- + array-like list access, but in linear time; tail recursive + */ func nth(l : List, n : Nat) : ?T = { switch (n, l) { case (_, null) { null }; @@ -80,7 +107,11 @@ let List = new { } }; - // reverse; tail recursive + /** + `rev` + -------- + reverse the list; tail recursive + */ func rev(l : List) : List = { func rec(l : List, r : List) : List { switch l { @@ -91,7 +122,11 @@ let List = new { rec(l, null) }; - // Called "app" in SML Basis, and "iter" in OCaml; tail recursive + /** + `iter` + --------- + Called `app` in SML Basis, and `iter` in OCaml; tail recursive + */ func iter(l : List, f:T -> ()) : () = { func rec(l : List) : () { switch l { @@ -102,8 +137,13 @@ let List = new { rec(l) }; - // map; non-tail recursive - // (Note: need mutable Cons tails for tail-recursive map) + /** + `map` + --------- + map the list elements; non-tail recursive + + Note: need mutable Cons tails for tail-recursive map. + */ func map(l : List, f:T -> S) : List = { func rec(l : List) : List { switch l { @@ -114,8 +154,11 @@ let List = new { rec(l) }; - // filter; non-tail recursive - // (Note: need mutable Cons tails for tail-recursive version) + /** + `filter` + ---------- + filter the list elements; non-tail recursive + */ func filter(l : List, f:T -> Bool) : List = { func rec(l : List) : List { switch l { @@ -126,8 +169,11 @@ let List = new { rec(l) }; - // map-and-filter; non-tail recursive - // (Note: need mutable Cons tails for tail-recursive version) + /** + `mapFilter` + -------------- + map and filter the list elements; non-tail recursive + */ func mapFilter(l : List, f:T -> ?S) : List = { func rec(l : List) : List { switch l { @@ -143,8 +189,11 @@ let List = new { rec(l) }; - // append; non-tail recursive - // (Note: need mutable Cons tails for tail-recursive version) + /** + `append` + --------- + append two lists; non-tail recursive + */ func append(l : List, m : List) : List = { func rec(l : List) : List { switch l { @@ -155,7 +204,11 @@ let List = new { rec(l) }; - // concat (aka "list join"); tail recursive, but requires "two passes" + /** + `concat` + ----------- + concat (aka "list join"); tail recursive, but requires "two passes" + */ func concat(l : List>) : List = { // 1/2: fold from left to right, reverse-appending the sublists... let r = @@ -166,7 +219,11 @@ let List = new { rev(r) }; - // (See SML Basis library); tail recursive + /** + `revAppend` + ------------- + See SML Basis library; tail recursive + */ func revAppend(l1 : List, l2 : List) : List = { switch l1 { case null { l2 }; @@ -174,8 +231,12 @@ let List = new { } }; - // take; non-tail recursive - // (Note: need mutable Cons tails for tail-recursive version) + /** + `take` + --------- + "take" `n` elements from the prefix of the given list. + If the given list has fewer than `n` elements, we return the full input list. + */ func take(l : List, n:Nat) : List = { switch (l, n) { case (_, 0) { null }; @@ -184,7 +245,10 @@ let List = new { } }; - // drop; tail recursive + /** + `drop` + ---------- + */ func drop(l : List, n:Nat) : List = { switch (l, n) { case (l_, 0) { l_ }; @@ -193,7 +257,11 @@ let List = new { } }; - // fold list left-to-right using f; tail recursive + /** + `foldLeft` + --------------- + fold list left-to-right using function `f`; tail recursive + */ func foldLeft(l : List, a:S, f:(T,S) -> S) : S = { func rec(l:List, a:S) : S = { switch l { @@ -204,7 +272,11 @@ let List = new { rec(l,a) }; - // fold list right-to-left using f; non-tail recursive + /*** + `foldRight` + ------------ + fold the list right-to-left using function `f`; non-tail recursive + */ func foldRight(l : List, a:S, f:(T,S) -> S) : S = { func rec(l:List) : S = { switch l { @@ -215,7 +287,11 @@ let List = new { rec(l) }; - // test if there exists list element for which given predicate is true + /** + `find` + ----------- + test if there exists list element for which given predicate is true + */ func find(l: List, f:T -> Bool) : ?T = { func rec(l:List) : ?T { switch l { @@ -226,7 +302,11 @@ let List = new { rec(l) }; - // test if there exists list element for which given predicate is true + /** + `exists` + --------- + test if there exists list element for which given predicate is true + */ func exists(l: List, f:T -> Bool) : Bool = { func rec(l:List) : Bool { switch l { @@ -239,7 +319,11 @@ let List = new { rec(l) }; - // test if given predicate is true for all list elements + /** + `all` + ------- + test if given predicate is true for all list elements + */ func all(l: List, f:T -> Bool) : Bool = { func rec(l:List) : Bool { switch l { @@ -250,7 +334,11 @@ let List = new { rec(l) }; - // Given two ordered lists, merge them into a single ordered list + /** + `merge` + --------- + Given two ordered lists, merge them into a single ordered list + */ func merge(l1: List, l2: List, lte:(T,T) -> Bool) : List { func rec(l1: List, l2: List) : List { switch (l1, l2) { @@ -268,9 +356,14 @@ let List = new { rec(l1, l2) }; - // Compare two lists lexicographic` ordering. tail recursive. - // XXX: Eventually, follow `collate` design from SML Basis, with real sum types, use 3-valued `order` type here. - // + /** + `lessThanEq` + -------------- + + Compare two lists lexicographic` ordering. tail recursive. + + To do: Eventually, follow `collate` design from SML Basis, with real sum types, use 3-valued `order` type here. + */ func lessThanEq(l1: List, l2: List, lte:(T,T) -> Bool) : Bool { func rec(l1: List, l2: List) : Bool { switch (l1, l2) { @@ -282,8 +375,13 @@ let List = new { rec(l1, l2) }; - // Compare two lists for equality. tail recursive. - // `isEq(l1, l2)` =equiv= `lessThanEq(l1,l2) && lessThanEq(l2,l1)`, but the former is more efficient. + /** + `isEq` + --------- + Compare two lists for equality. tail recursive. + + `isEq(l1, l2)` is equivalent to `lessThanEq(l1,l2) && lessThanEq(l2,l1)`, but the former is more efficient. + */ func isEq(l1: List, l2: List, eq:(T,T) -> Bool) : Bool { func rec(l1: List, l2: List) : Bool { switch (l1, l2) { @@ -296,8 +394,12 @@ let List = new { rec(l1, l2) }; - // using a predicate, create two lists from one: the "true" list, and the "false" list. - // (See SML basis library); non-tail recursive + /** + `partition` + --------------- + using a predicate, create two lists from one: the "true" list, and the "false" list. + (See SML basis library); non-tail recursive. + */ func partition(l: List, f:T -> Bool) : (List, List) { func rec(l: List) : (List, List) { switch l { @@ -315,8 +417,12 @@ let List = new { rec(l) }; - // generate a list based on a length, and a function from list index to list element; - // (See SML basis library); non-tail recursive + /** + `tabulate` + -------------- + generate a list based on a length, and a function from list index to list element. + (See SML basis library); non-tail recursive. + */ func tabulate(n:Nat, f:Nat -> T) : List { func rec(i:Nat) : List { if (i == n) { null } else { ?(f(i), rec(i+1)) } @@ -325,3 +431,13 @@ let List = new { }; }; + +/** + +To do: +-------- +- iterator objects, for use in `for ... in ...` patterns +- operations for lists of pairs and pairs of lists: zip, split, etc +- more regression tests for everything that is below + +*/ diff --git a/stdlib/markdown-of-actorscript.py b/stdlib/markdown-of-actorscript.py new file mode 100755 index 00000000000..f3a91e1eee6 --- /dev/null +++ b/stdlib/markdown-of-actorscript.py @@ -0,0 +1,215 @@ +#!/usr/bin/env python + +## Objective +## ---------- +## +## The objective of this program is to "Invert" an ActorScript source +## file into a corresponding Markdown file with snippets of +## ActorScript. The markdown becomes the "outer format" with +## ActorScript (as code blocks) as the "inner format". In this sense, +## the objective is to "invert" the inner/outer relationship, and +## preserve everything else that's sensible. +## +## Design question: If we have the freedom to order the Decls of an +## ActorScript file any way we choose, then what's the best order to +## tell a **narrative** version of the file? + +## The immediate benefit of using this tool: +## +## - The stdlib directory is source code that documents itself to seed +## WIP guide for ActorScript; this workflow is possible via a +## Makefile. +## +## - By exposing the Markdown on the outside, the outline/narrative +## structure is primary, and can be hyperlinked for sharing and +## remote discussions, e.g., as markdown files in a github repo. +## +## - By exposing the Markdown on the outside, the outline/narrative +## structure is primary and we can explain how implementations of +## the Produce Exchange and the standard library, and future +## examples, work, in a "literate programming" style. + +## Assumptions +## ------------ +## +## - KISS: This tool is a placeholder for a better tool that actually +## understands ActorScript. +## +## - KISS: This tool does not try to be intelligent +## when it can force you (the programmer) to do something only +## somewhat annoying, e.g.: +## +## - KISS: Dont mix mode-switch patterns and other ActorScript comments on a single line## +## - KISS: Dont mix mode switches on a single line. +## + + +## Stable links +## ----------- +## https://hydra.oregon.dfinity.build//job/dfinity-ci-build/actorscript.pr-234/stdlib-reference/latest/download/1/doc/ +## +## (PR 234 is the current PR for the standard library and produce exchange) +## + +DOCURL="https://hydra.oregon.dfinity.build//job/dfinity-ci-build/actorscript.pr-234/stdlib-reference/latest/download/1/doc/" + +############################################################################################################# + +import sys +import re + +showActorScript=True +#showActorScript=False + +OmitActorScript="OmitActorScript" +ActorScript="ActorScript" +Markdown="Markdown" +Comment="Comment" +Done="Done" + +modeType=[ActorScript, Markdown, Comment, OmitActorScript, Done] + +# eventually, detect this `outerMode` based on the input file name and +# handle the other way around; for now, we assume only this way +# around: +outerMode = ActorScript +ignoreNonMarkdownComments= False + +mode = outerMode +modeOpen="```ActorScript" +modeLines = [] +modeClose="```" + +def switchModeTo(toMode, toModeOpen, toModeClose): + global mode + global modeOpen + global modeClose + global modeLines + + if toMode == mode: + return (False, []) + + if len(modeLines) > 0: + # Normal case: + if mode != OmitActorScript and (showActorScript or mode != ActorScript): + print modeOpen + if modeLines[-1] == "": + modeLines.pop() + print "" + for l in modeLines: + l = l.replace("$DOCURL", DOCURL); + print(l.rstrip()) + print modeClose + # The source file explicitly omitted this + elif mode == OmitActorScript: + print "```" + print ". . . (selectively omitted, explicitly) . . ." + print "```" + # the flag says no + elif mode == ActorScript and not showActorScript: + print "```" + print ". . . (all ActorScript is omitted) . . ." + print "```" + # impossible! + else: + assert False + + mode = toMode + modeOpen = toModeOpen + modeClose = toModeClose + modeLines = [] + +# empty line, or just whitespace; +def whiteSpaceLine(): + #debug "whitespace-only line, noted." + # record if its not the first, or last thing we saw + if len(modeLines) > 0 and modeLines[-1] != "": + modeLines.append("") + +with open(sys.argv[1], "r") as ins: + for line in ins: + #debug "read line (", mode, "): `", line.rstrip(), "`" + + # Now discriminate between lines that switch modes, and "ordinary lines" + # For now, assume 0 or 1 mode switches per line; later, handle breaking those on the same line up + + # Start Markdown comment + if re.match(r'/\*\*([^\*])+\*/', line.lstrip()): + p = re.compile(r'(/\*\*)([^\*/]*)(\*/)') + groups = p.match(line.lstrip()).groups() + if len(groups) == 3: + savedMode = mode + savedOpen = modeOpen + savedClose = modeClose + switchModeTo(Markdown, "", "") + modeLines.append(groups[1]) + switchModeTo(savedMode, savedOpen, savedClose) + else: + # ignore the comment; no content + assert True + + # Start Markdown comment + elif re.match(r'/\*\*', line.lstrip()): + switchModeTo(Markdown, "", "") + + # Start ordinary comment + elif re.match(r'/\*', line.lstrip()): + switchModeTo(Comment, "/* ", "*/") + + # Horizontal rule, in actorscript code + elif re.match(r'//////////+', line.lstrip()): + # Horizontal rule in ActorScript + if mode == ActorScript: + if len(modeLines) > 0: + modeLines.append("```") + modeLines.append("-----------------------------------------------------------------------") + modeLines.append("```") + else: + assert True + + # Close markdown or comment block and omit the next otherwise-ActorScript block: + elif re.match(r'//\s*@Omit:', line.lstrip()): + switchModeTo(OmitActorScript, "", "") + + # One-line comment + elif re.match(r'//+ \S*\s*', line.lstrip()): + matches = re.split(r'//+ ', line.lstrip()) + if mode == Markdown: + modeLines.append(matches[1].rstrip()) + elif mode == ActorScript: + if ignoreNonMarkdownComments: + #debug "ignoring single-line comment." + assert True + else: + #debug "append single-line comment (", mode, "): `", line.rstrip(), "`" + modeLines.append(line.rstrip()) + elif mode == Comment: + modeLines.append(matches[1].rstrip()) + elif mode == OmitActorScript: + assert True + else: + assert False + + # One-line comment, with no content + elif re.match(r'//', line.lstrip()): + whiteSpaceLine() + + # Close markdown or comment block + elif re.match(r'\*/', line.lstrip()): + switchModeTo(ActorScript, "```ActorScript", "```") + + else: + #debug "non-empty line" + # non-empty line + if re.match(r'\S', line.lstrip()): + if mode == Comment: + # do nothing + #debug "ignore line (", mode, "): `", line.rstrip(), "`" + assert True + else: + #debug "append line (", mode, "): `", line.rstrip(), "`" + modeLines.append(line.rstrip()) + else: + whiteSpaceLine() + +switchModeTo(Done, "", "") diff --git a/stdlib/markdown-of-markdown.py b/stdlib/markdown-of-markdown.py new file mode 100755 index 00000000000..14c719d9b7d --- /dev/null +++ b/stdlib/markdown-of-markdown.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python + +## Stable links +## ----------- +## https://hydra.oregon.dfinity.build//job/dfinity-ci-build/actorscript.pr-234/stdlib-reference/latest/download/1/doc/ +## +## (PR 234 is the current PR for the standard library and produce exchange) +## + +DOCURL="https://hydra.oregon.dfinity.build//job/dfinity-ci-build/actorscript.pr-234/stdlib-reference/latest/download/1/doc/" + +############################################################################################################# + +import sys +import re + +with open(sys.argv[1], "r") as ins: + for line in ins: + line = line.replace("$DOCURL", DOCURL); + print line.rstrip() diff --git a/stdlib/prelude.as b/stdlib/prelude.as new file mode 100644 index 00000000000..93279559aba --- /dev/null +++ b/stdlib/prelude.as @@ -0,0 +1,102 @@ +/** + +Stdlib prelude +=============== + + This prelude file proposes standard library features that _may_ +belong in the _language_ (compiler-internal) prelude sometime, after +some further experience and discussion. Until then, they live here. + +*/ + +/*** + + `nyi`: Not yet implemented + ----------------------------- + + Mark incomplete code with the `nyi` and `xxx` functions. + + Each have calls are well-typed in all typing contexts, which +trap in all execution contexts. + +*/ + +func nyi() : None = + { assert false ; nyi(); }; + +func xxx() : None = + { assert false ; xxx(); }; + +/*** + + `unreachable` + -------------------- + + Mark unreachable code with the `unreachable` function. + + Calls are well-typed in all typing contexts, and they + trap in all execution contexts. + +*/ +func unreachable() : None = { assert false ; unreachable() }; + +/*** + + `assertSome` + -------------------- + + Assert that the given value is not `null`; ignore this value and return unit. + +*/ +func assertSome( x : ?X ) = { + switch x { + case null { unreachable() }; + case (?_) { }; + } +}; + +/*** + + `assertNull` + -------------------- + + Assert that the given value is `null`; ignore this value and return unit. + +*/ +func assertNull( x : ?X ) = { + switch x { + case null { }; + case (?_) { unreachable() }; + } +}; + +/*** + + `unwrap` + -------------------- + + Unwrap an optional value, and fail if it is `null`. + +*/ +func unwrap(ox:?T) : T { + switch ox { + case (null) { unreachable() }; + case (?x) x; + } +}; + +/*** + + `printOpInt` + -------------------- + + Print an optional integer. + +*/ +func printOpInt( x : ?Int ) = { + switch x { + case null { print "null" }; + case (?x_) { print "?"; printInt x_ }; + } +}; + diff --git a/stdlib/set.as b/stdlib/set.as index 4961b890aab..68e34ced570 100644 --- a/stdlib/set.as +++ b/stdlib/set.as @@ -1,21 +1,22 @@ -// import Trie; +/** + + Sets + ======== -/////////////////////////////////////////////////////////////////////// - -/* Sets are partial maps from element type to unit type, i.e., the partial map represents the set with its domain. - */ - -// TODO-Matthew: -// -// - for now, we pass a hash value each time we pass an element value; -// in the future, we might avoid passing element hashes with each element in the API; -// related to: https://dfinity.atlassian.net/browse/AST-32 -// -// - similarly, we pass an equality function when we do some operations. -// in the future, we might avoid this via https://dfinity.atlassian.net/browse/AST-32 -// + + TODO-Matthew: + --------------- + + - for now, we pass a hash value each time we pass an element value; + in the future, we might avoid passing element hashes with each element in the API; + related to: https://dfinity.atlassian.net/browse/AST-32 + + - similarly, we pass an equality function when we do some operations. + in the future, we might avoid this via https://dfinity.atlassian.net/browse/AST-32 + +*/ type Set = Trie; @@ -66,7 +67,7 @@ let Set = new { func intersect(s1:Set, s2:Set, eq:(T,T)->Bool):Set { let noop : ((),())->(()) = func (_:(),_:()):(())=(); - let s3 = Trie.conj(s1, s2, eq, noop); + let s3 = Trie.join(s1, s2, eq, noop); s3 }; diff --git a/stdlib/setDb.as b/stdlib/setDb.as index 84351439ff9..1e497497707 100644 --- a/stdlib/setDb.as +++ b/stdlib/setDb.as @@ -1,33 +1,16 @@ -// import Set +/** -//////////////////////////////////////////////////////////////////// + Debugging wrapper around `Set` module + ======================================== + + */ let SetDb = new { private func setDbPrint(s:Set) { - func rec(s:Set, ind:Nat, bits:Hash) { + func rec(s:Set, ind:Nat, bits:BitList) { func indPrint(i:Nat) { if (i == 0) { } else { print "| "; indPrint(i-1) } }; - func bitsPrintRev(bits:Bits) { - switch bits { - case null { print "" }; - case (?(bit,bits_)) { - bitsPrintRev(bits_); - if bit { print "1R." } - else { print "0L." } - } - } - }; - func hashPrintRev(bits:Bits) { - switch bits { - case null { print "" }; - case (?(bit,bits_)) { - hashPrintRev(bits_); - if bit { print "1" } - else { print "0" } - } - } - }; switch s { case null { //indPrint(ind); @@ -54,7 +37,7 @@ let SetDb = new { print("hash("); printInt(k.key); print(")="); - hashPrintRev(k.hash); + Hash.hashPrintRev(k.hash); print("; "); () } diff --git a/stdlib/setDbTest.as b/stdlib/setDbTest.as index 193e54e7772..8fc3e71b8f4 100644 --- a/stdlib/setDbTest.as +++ b/stdlib/setDbTest.as @@ -1,15 +1,23 @@ -// import SetDb +/** + + Tests with debugging wrapper around `Set` module + =============================================== + + */ func SetDb__test() { - let hash_0 = ?(false,?(false,?(false,?(false, null)))); - let hash_1 = ?(false,?(false,?(false,?(true, null)))); - let hash_2 = ?(false,?(false,?(true, ?(false, null)))); - let hash_3 = ?(false,?(false,?(true, ?(true, null)))); - let hash_4 = ?(false,?(true, ?(false,?(false, null)))); - let hash_5 = ?(false,?(true, ?(false,?(true, null)))); - let hash_6 = ?(false,?(true, ?(true, ?(false, null)))); - let hash_7 = ?(false,?(true, ?(true, ?(true, null)))); - let hash_8 = ?(true, ?(false,?(false,?(false, null)))); + + let hash_0 = Hash.hashOfInt(0); + let hash_1 = Hash.hashOfInt(1); + let hash_2 = Hash.hashOfInt(2); + let hash_3 = Hash.hashOfInt(3); + let hash_4 = Hash.hashOfInt(4); + let hash_5 = Hash.hashOfInt(5); + let hash_6 = Hash.hashOfInt(6); + let hash_7 = Hash.hashOfInt(7); + let hash_8 = Hash.hashOfInt(8); + let hash_9 = Hash.hashOfInt(9); + print "inserting...\n"; // Insert numbers [0..8] into the set, using their bits as their hashes: @@ -172,4 +180,4 @@ func SetDb__test() { print "done.\n"; }; -SetDb__test(); \ No newline at end of file +SetDb__test(); diff --git a/stdlib/trie.as b/stdlib/trie.as index 2791c47bdf6..63e3461f205 100644 --- a/stdlib/trie.as +++ b/stdlib/trie.as @@ -1,71 +1,65 @@ -/* - Hash Tries in ActorScript - ------------------------- +/** - Functional maps (and sets) whose representation is "canonical", and - history independent. +Hash tries +====================== - See this POPL 1989 paper (Section 6): - - "Incremental computation via function caching", Pugh & Teitelbaum. - - https://dl.acm.org/citation.cfm?id=75305 - - Public copy here: http://matthewhammer.org/courses/csci7000-s17/readings/Pugh89.pdf +Functional maps (and sets) whose representation is "canonical", and +history independent. - By contrast, other usual functional representations of maps (AVL - Trees, Red-Black Trees) do not enjoy history independence, and are - each more complex to implement (e.g., each requires "rebalancing"; - these trees never do). +Background +------------------ + +See this POPL 1989 paper (Section 6): + + - ["Incremental computation via function caching", Pugh & Teitelbaum](https://dl.acm.org/citation.cfm?id=75305). + - [Public copy here](http://matthewhammer.org/courses/csci7000-s17/readings/Pugh89.pdf). + +By contrast, other usual functional representations of maps (AVL +Trees, Red-Black Trees) do not enjoy history independence, and are +each more complex to implement (e.g., each requires "rebalancing"; +these trees never do). + +Assumptions +============= + +Uniform depth assumption: +------------------------------ + +We make a simplifying assumption, for now: All defined paths in the +trie have a uniform length, the same as the number of bits of a +hash, starting at the LSB, that we use for indexing. + +- If the number is too low, our expected O(log n) bounds become + expected O(n). + +- If the number is too high, we waste constant factors for + representing small sets/maps. + +In [future work](#adaptive-path-lengths), we can make this more robust +by making this number adaptive for each path, and based on the content +of the trie along that path. + +See [Future work](#future-work) below +-------------------------------------- +*/ + +/** +Representation +===================== + +Below, we define the types used in the representation: + + - **`Key`**, parameterized by a hashable type `K` + - **`Branch`**, for binary nodes with two tries. + - **`Leaf`**, for leaf nodes with no sub-tries. We use [association lists](#association-lists) there. + - We use **`null`** for the empty trie, and + - **`?Node`** represents all three possibilities until we have variants. - */ -// Done: -// -// - (hacky) type definition; XXX: need real sum types to clean it up -// - find operation -// - insert operation -// - remove operation -// - replace operation (remove+insert via a single traversal) -// - basic encoding of sets, and some set operations -// - basic tests (and primitive debugging) for set operations -// - write trie operations that operate over pairs of tries: -// for set union, difference and intersection. -// - handle hash collisions gracefully using association list module - -// TODO-Matthew: -// -// - (more) regression tests for everything that is below -// -// - adapt the path length of each subtree to its cardinality; avoid -// needlessly long paths, or paths that are too short for their -// subtree's size. -// -// - iterator objects, for use in 'for ... in ...' patterns - - -// import List - -// TEMP: A "bit string" as a linked list of bits: -type Bits = ?(Bool, Bits); - -// TODO: Replace this definition WordX, for some X, once we have these types in AS. -type Hash = Bits; -//type Hash = Word16; -//type Hash = Word8; - -// Uniform depth assumption: -// -// We make a simplifying assumption, for now: All defined paths in the -// trie have a uniform length, the same as the number of bits of a -// hash, starting at the LSB, that we use for indexing. -// -// - If the number is too low, our expected O(log n) bounds become -// expected O(n). -// -// - If the number is too high, we waste constant factors for -// representing small sets/maps. -// -// TODO: Make this more robust by making this number adaptive for each -// path, and based on the content of the trie along that path. -// +See the full details in the definitions below: + +*/ + let HASH_BITS = 4; type Key = { @@ -81,8 +75,8 @@ type Branch = { left:Trie; right:Trie; }; -// Leaf nodes are association lists of `Key`s -// Every key shares a common hash prefix, its trie path. +// Leaf nodes are association lists of `Key`s where every key +// shares a common hash prefix, its (common) trie path. type Leaf = { keyvals:List<(Key,V)>; }; @@ -96,198 +90,64 @@ type Node = { type Trie = ?Node; -/* See AST-42 (sum types); we want this type definition instead: +/** + Association lists + ----------------- + Notice above that the `Leaf` case uses a list of key-value pairs. - // Use a sum type (AST-42) - type Trie = { #leaf : LeafNode; #bin : BinNode; #empty }; - type BinNode = { left:Trie; right:Trie }; - type LeafNode = { key:K; val:V }; + See [this document]($DOCURL/assocList.html) for more details. +*/ - */ +/** + Two-dimensional trie + --------------------- + A 2D trie is just a trie that maps dimension-1 keys to another + layer of tries, each keyed on the dimension-2 keys. +*/ +type Trie2D = Trie >; -let Trie = new { +/** + Three-dimensional trie + --------------------- + A 3D trie is just a trie that maps dimension-1 keys to another + layer of 2D tries, each keyed on the dimension-2 and dimension-3 keys. +*/ +type Trie3D = Trie >; - // XXX: until AST-42: - func isNull(x : ?X) : Bool { - switch x { - case null { true }; - case (?_) { false }; - }; - }; - - // XXX: until AST-42: - func assertIsNull(x : ?X) { - switch x { - case null { assert true }; - case (?_) { assert false }; - }; - }; - - // XXX: until AST-42: - func makeEmpty() : Trie - = null; - - // Note: More general version of this operation below, which tests for - // "deep emptiness" (subtrees that have branching structure, but no - // leaves; these can result from naive filtering operations, for - // instance). - // - // // XXX: until AST-42: - // func isEmpty(t:Trie) : Bool { - // switch t { - // case null { true }; - // case (?_) { false }; - // }; - // }; +/** + Module interface + =================== - // XXX: until AST-42: - func assertIsEmpty(t : Trie) { - switch t { - case null { assert true }; - case (?_) { assert false }; - }; - }; + For non-public helpers used in these definitions, see below: - // XXX: until AST-42: - func makeBin(l:Trie, r:Trie) : Trie { - ?(new {left=l; right=r; keyvals=null; }) - }; + - [Helpers 1](#helpers-for-hashing) + - [Helpers 2](#helpers-for-missing-variants) - // XXX: until AST-42: - func isBin(t:Trie) : Bool { - switch t { - case null { false }; - case (?t_) { - switch (t_.keyvals) { - case null { true }; - case _ { false }; - }; - }; - } - }; - - // XXX: until AST-42: - func makeLeaf(kvs:AssocList,V>) : Trie { - ?(new {left=null; right=null; keyvals=kvs }) - }; - - // XXX: until AST-42: - func matchLeaf(t:Trie) : ?List<(Key,V)> { - switch t { - case null { null }; - case (?t_) { - switch (t_.keyvals) { - case (?keyvals) ?(?(keyvals)); - case (_) null; - } - }; - } - }; - - // XXX: until AST-42: - func isLeaf(t:Trie) : Bool { - switch t { - case null { false }; - case (?t_) { - switch (t_.keyvals) { - case null { false }; - case _ { true }; - } - }; - } - }; - // XXX: until AST-42: - func assertIsBin(t : Trie) { - switch t { - case null { assert false }; - case (?n) { - assertIsNull<((Key,V),AssocList,V>)>(n.keyvals); - }; - } - }; - - // XXX: until AST-42: - func getLeafKey(t : Node) : Key { - assertIsNull>(t.left); - assertIsNull>(t.right); - switch (t.keyvals) { - case (?((k,v),_)) { k }; - case (null) { /* ERROR */ getLeafKey(t) }; - } - }; - - // XXX: this helper is an ugly hack; we need real sum types to avoid it, I think: - func getLeafVal(t : Node) : V { - assertIsNull>(t.left); - assertIsNull>(t.right); - switch (t.keyvals) { - case (?((k,v),_)) { v }; - case null { /* ERROR */ getLeafVal(t) }; - } - }; + */ - // TODO: Replace with bitwise operations on Words, once we have each of those in AS. - // For now, we encode hashes as lists of booleans. - func getHashBit(h:Hash, pos:Nat) : Bool { - switch h { - case null { - // XXX: Should be an error case; it shouldn't happen in our tests if we set them up right. - false - }; - case (?(b, h_)) { - if (pos == 0) { b } - else { getHashBit(h_, pos-1) } - }; - } - }; +let Trie = new { - // Test if two lists of bits are equal. - func hashEq(ha:Hash, hb:Hash) : Bool { - switch (ha, hb) { - case (null, null) true; - case (null, _) false; - case (_, null) false; - case (?(bita, ha2), ?(bitb, hb2)) { - if (bita == bitb) { hashEq(ha2, hb2) } - else { false } - }; - } - }; + /** + `empty` + -------- + An empty trie. + */ + func empty() : Trie = makeEmpty(); - // Equality function for two `Key`s, in terms of equaltiy of `K`'s. - func keyEq(keq:(K,K) -> Bool) : ((Key,Key) -> Bool) = { - func (key1:Key, key2:Key) : Bool = - (hashEq(key1.hash, key2.hash) and keq(key1.key, key2.key)) - }; + /** + `copy` + --------- + Purely-functional representation permits _O(1)_-time copy, via persistent sharing. - // part of "public interface": - func empty() : Trie = makeEmpty(); + */ - // helper function for constructing new paths of uniform length - func buildNewPath(bitpos:Nat, k:Key, ov:?V) : Trie { - func rec(bitpos:Nat) : Trie { - if ( bitpos < HASH_BITS ) { - // create new bin node for this bit of the hash - let path = rec(bitpos+1); - let bit = getHashBit(k.hash, bitpos); - if bit { - ?(new {left=null; right=path; keyvals=null}) - } - else { - ?(new {left=path; right=null; keyvals=null}) - } - } else { - // create new leaf for (k,v) pair, if the value is non-null: - switch ov { - case null { ?(new {left=null; right=null; keyvals=null }) }; - case (?v) { ?(new {left=null; right=null; keyvals=?((k,v),null) }) }; - } - } - }; - rec(bitpos) - }; + func copy(t : Trie) : Trie = t; - // replace the given key's value option with the given one, returning the previous one + /** + `replace` + --------- + replace the given key's value option with the given one, returning the previous one + */ func replace(t : Trie, k:Key, k_eq:(K,K)->Bool, v:?V) : (Trie, ?V) { let key_eq = keyEq(k_eq); // For `bitpos` in 0..HASH_BITS, walk the given trie and locate the given value `x`, if it exists. @@ -297,7 +157,7 @@ let Trie = new { case null { (buildNewPath(bitpos, k, v), null) }; case (?n) { assertIsBin(t); - let bit = getHashBit(k.hash, bitpos); + let bit = Hash.getHashBit(k.hash, bitpos); // rebuild either the left or right path with the inserted (k,v) pair if (not bit) { let (l, v_) = rec(n.left, bitpos+1); @@ -326,17 +186,191 @@ let Trie = new { rec(t, 0) }; - // insert the given key's value in the trie; return the new trie + /** + `replaceThen` + ------------ + replace the given key's value in the trie, + and only if successful, do the success continuation, + otherwise, return the failure value + */ + func replaceThen(t : Trie, k:Key, k_eq:(K,K)->Bool, v2:V, + success: (Trie, V) -> X, + fail: () -> X) + : X + { + let (t2, ov) = replace(t, k, k_eq, ?v2); + switch ov { + case (null) { /* no prior value; failure to remove */ fail() }; + case (?v1) { success(t2, v1) }; + } + }; + + /** + `insert` + ------------ + insert the given key's value in the trie; return the new trie, and the previous value associated with the key, if any + */ func insert(t : Trie, k:Key, k_eq:(K,K)->Bool, v:V) : (Trie, ?V) { replace(t, k, k_eq, ?v) }; - // remove the given key's value in the trie; return the new trie + /** + `insertFresh` + ---------------- + insert the given key's value in the trie; return the new trie; assert that no prior value is associated with the key + */ + func insertFresh(t : Trie, k:Key, k_eq:(K,K)->Bool, v:V) : Trie { + let (t2, none) = replace(t, k, k_eq, ?v); + switch none { + case (null) (); + case (?_) assert false; + }; + t2 + }; + + /** + `insert2D` + --------------- + insert the given key's value in the 2D trie; return the new 2D trie. + */ + func insert2D(t : Trie2D, + k1:Key, k1_eq:(K1,K1)->Bool, + k2:Key, k2_eq:(K2,K2)->Bool, + v:V) + : Trie2D + { + let inner = find>(t, k1, k1_eq); + let (updated_inner, _) = switch inner { + case (null) { insert(null, k2, k2_eq, v) }; + case (?inner) { insert(inner, k2, k2_eq, v) }; + }; + let (updated_outer, _) = { insert>(t, k1, k1_eq, updated_inner) }; + updated_outer; + }; + + /** + `insert3D` + --------------- + insert the given key's value in the trie; return the new trie; + */ + func insert3D + (t : Trie3D, + k1:Key, k1_eq:(K1,K1)->Bool, + k2:Key, k2_eq:(K2,K2)->Bool, + k3:Key, k3_eq:(K3,K3)->Bool, + v:V + ) + : Trie3D + { + let inner1 = find>(t, k1, k1_eq); + let (updated_inner1, _) = switch inner1 { + case (null) { + insert>( + null, k2, k2_eq, + (insert(null, k3, k3_eq, v)).0 + ) + }; + case (?inner1) { + let inner2 = find>(inner1, k2, k2_eq); + let (updated_inner2, _) = switch inner2 { + case (null) { insert(null, k3, k3_eq, v) }; + case (?inner2) { insert(inner2, k3, k3_eq, v) }; + }; + insert>( inner1, k2, k2_eq, updated_inner2 ) + }; + }; + let (updated_outer, _) = { insert>(t, k1, k1_eq, updated_inner1) }; + updated_outer; + }; + + /** + `remove` + ------------- + remove the given key's value in the trie; return the new trie + */ func remove(t : Trie, k:Key, k_eq:(K,K)->Bool) : (Trie, ?V) { replace(t, k, k_eq, null) }; - // find the given key's value in the trie, or return null if nonexistent + /** + `removeThen` + ------------ + remove the given key's value in the trie, + and only if successful, do the success continuation, + otherwise, return the failure value + */ + func removeThen(t : Trie, k:Key, k_eq:(K,K)->Bool, + success: (Trie, V) -> X, + fail: () -> X) + : X + { + let (t2, ov) = replace(t, k, k_eq, null); + switch ov { + case (null) { /* no prior value; failure to remove */ fail() }; + case (?v) { success(t2, v) }; + } + }; + + + /** + `remove2D` + -------------- + remove the given key-key pair's value in the 2D trie; return the + new trie, and the prior value, if any. + */ + func remove2D(t : Trie2D, + k1:Key, k1_eq:(K1,K1)->Bool, + k2:Key, k2_eq:(K2,K2)->Bool) + : (Trie2D, ?V) + { + switch (find>(t, k1, k1_eq)) { + case (null) { + (t, null) + }; + case (?inner) { + let (updated_inner, ov) = remove(inner, k2, k2_eq); + let (updated_outer, _) = { + insert>(t, k1, k1_eq, updated_inner) + }; + (updated_outer, ov) + }; + } + }; + + /** + `remove3D` + --------------- + remove the given key-key pair's value in the 3D trie; return the + new trie, and the prior value, if any. + */ + func remove3D + (t : Trie3D, + k1:Key, k1_eq:(K1,K1)->Bool, + k2:Key, k2_eq:(K2,K2)->Bool, + k3:Key, k3_eq:(K3,K3)->Bool, + ) + : (Trie3D, ?V) + { + switch (find>(t, k1, k1_eq)) { + case (null) { + (t, null) + }; + case (?inner) { + let (updated_inner, ov) = remove2D(inner, k2, k2_eq, k3, k3_eq); + let (updated_outer, _) = { + insert>(t, k1, k1_eq, updated_inner) + }; + (updated_outer, ov) + }; + } + }; + + + /** + `find` + --------- + find the given key's value in the trie, or return null if nonexistent + */ func find(t : Trie, k:Key, k_eq:(K,K) -> Bool) : ?V { let key_eq = keyEq(k_eq); // For `bitpos` in 0..HASH_BITS, walk the given trie and locate the given value `x`, if it exists. @@ -349,7 +383,7 @@ let Trie = new { }; case (?n) { assertIsBin(t); - let bit = getHashBit(k.hash, bitpos); + let bit = Hash.getHashBit(k.hash, bitpos); if (not bit) { rec(n.left, bitpos+1) } else { rec(n.right, bitpos+1) } }; @@ -368,10 +402,21 @@ let Trie = new { rec(t, 0) }; - // merge tries, preferring the right trie where there are collisions - // in common keys. note: the `disj` operation generalizes this `merge` - // operation in various ways, and does not (in general) loose - // information; this operation is a simpler, special case. + /** + `merge` + --------- + merge tries, preferring the right trie where there are collisions + in common keys. note: the `disj` operation generalizes this `merge` + operation in various ways, and does not (in general) loose + information; this operation is a simpler, special case. + + See also: + + - [`disj`](#disj) + - [`join`](#join) + - [`prod`](#prod) + + */ func merge(tl:Trie, tr:Trie, k_eq:(K,K)->Bool): Trie { let key_eq = keyEq(k_eq); func rec(tl:Trie, tr:Trie) : Trie { @@ -417,9 +462,92 @@ let Trie = new { rec(tl, tr) }; - // The key-value pairs of the final trie consists of those pairs of - // the left trie whose keys are not present in the right trie; the - // values of the right trie are irrelevant. + /** + `mergeDisjoint` + ---------------- + like `merge`, it merges tries, but unlike `merge`, it signals a + dynamic error if there are collisions in common keys between the + left and right inputs. + */ + func mergeDisjoint(tl:Trie, tr:Trie, k_eq:(K,K)->Bool): Trie { + let key_eq = keyEq(k_eq); + func rec(tl:Trie, tr:Trie) : Trie { + switch (tl, tr) { + case (null, _) { return tr }; + case (_, null) { return tl }; + case (?nl,?nr) { + switch (isBin(tl), + isBin(tr)) { + case (true, true) { + let t0 = rec(nl.left, nr.left); + let t1 = rec(nl.right, nr.right); + makeBin(t0, t1) + }; + case (false, true) { + assert(false); + // XXX impossible, until we lift uniform depth assumption + tr + }; + case (true, false) { + assert(false); + // XXX impossible, until we lift uniform depth assumption + tr + }; + case (false, false) { + /// handle hash collisions by using the association list: + makeLeaf( + AssocList.disj,V,V,V>( + nl.keyvals, nr.keyvals, + key_eq, + func (x:?V, y:?V):V = { + switch (x, y) { + case (null, null) { + /* IMPOSSIBLE case. */ + assert false; func x():V=x(); x() + }; + case (?_, ?_) { + /* INVALID case: left and right defined for the same key */ + assert false; func x():V=x(); x() + }; + case (null, ?v) v; + case (?v, null) v; + }} + )) + }; + } + }; + } + }; + rec(tl, tr) + }; + + + /** + `mergeDisjoint2D` + -------------- + + Like [`mergeDisjoint`](#mergedisjoint), except instead of merging a + pair, it merges the collection of dimension-2 sub-trees of a 2D + trie. + + */ + func mergeDisjoint2D(t : Trie2D, k1_eq:(K1,K1)->Bool, k2_eq:(K2,K2)->Bool) + : Trie + { + foldUp, Trie> + ( t, + func (t1:Trie, t2:Trie):Trie { mergeDisjoint(t1, t2, k2_eq) }, + func (_:K1, t:Trie): Trie { t }, + null ) + }; + + /** + `diff` + ------ + The key-value pairs of the final trie consists of those pairs of + the left trie whose keys are not present in the right trie; the + values of the right trie are irrelevant. + */ func diff(tl:Trie, tr:Trie, k_eq:(K,K)->Bool) : Trie { let key_eq = keyEq(k_eq); func rec(tl:Trie, tr:Trie) : Trie { @@ -457,15 +585,29 @@ let Trie = new { rec(tl, tr) }; - // This operation generalizes the notion of "set union" to finite maps. - // Produces a "disjunctive image" of the two tries, where the values of - // matching keys are combined with the given binary operator. - // - // For unmatched key-value pairs, the operator is still applied to - // create the value in the image. To accomodate these various - // situations, the operator accepts optional values, but is never - // applied to (null, null). - // + /** + `disj` + -------- + + This operation generalizes the notion of "set union" to finite maps. + + Produces a "disjunctive image" of the two tries, where the values of + matching keys are combined with the given binary operator. + + For unmatched key-value pairs, the operator is still applied to + create the value in the image. To accomodate these various + situations, the operator accepts optional values, but is never + applied to (null, null). + + Implements the database idea of an ["outer join"](https://stackoverflow.com/questions/38549/what-is-the-difference-between-inner-join-and-outer-join). + + See also: + + - [`join`](#join) + - [`merge`](#merge) + - [`prod`](#prod) + + */ func disj(tl:Trie, tr:Trie, k_eq:(K,K)->Bool, vbin:(?V,?W)->X) : Trie @@ -528,11 +670,24 @@ let Trie = new { rec(tl, tr) }; - // This operation generalizes the notion of "set intersection" to - // finite maps. Produces a "conjuctive image" of the two tries, where - // the values of matching keys are combined with the given binary - // operator, and unmatched key-value pairs are not present in the output. - func conj(tl:Trie, tr:Trie, + /** + `join` + --------- + This operation generalizes the notion of "set intersection" to + finite maps. Produces a "conjuctive image" of the two tries, where + the values of matching keys are combined with the given binary + operator, and unmatched key-value pairs are not present in the output. + + Implements the database idea of an ["inner join"](https://stackoverflow.com/questions/38549/what-is-the-difference-between-inner-join-and-outer-join). + + See also: + + - [`disj`](#disj) + - [`merge`](#merge) + - [`prod`](#prod) + + */ + func join(tl:Trie, tr:Trie, k_eq:(K,K)->Bool, vbin:(V,W)->X) : Trie { @@ -564,7 +719,7 @@ let Trie = new { assert(isLeaf(tl)); assert(isLeaf(tr)); makeLeaf( - AssocList.conj,V,W,X>(nl.keyvals, nr.keyvals, key_eq, vbin) + AssocList.join,V,W,X>(nl.keyvals, nr.keyvals, key_eq, vbin) ) }; } @@ -573,10 +728,67 @@ let Trie = new { rec(tl, tr) }; - // This operation gives a recursor for the internal structure of - // tries. Many common operations are instantiations of this function, - // either as clients, or as hand-specialized versions (e.g., see map, - // mapFilter, exists and forAll below). + + /** + + `prod` + --------- + + Conditional _catesian product_, where the given + operation `op` _conditionally_ creates output elements in the + resulting trie. + + The keyed structure of the input tries are not relevant for this + operation: all pairs are considered, regardless of keys matching or + not. Moreover, the resulting trie may use keys that are unrelated to + these input keys. + + See also: + + - [`disj`](#disj) + - [`join`](#join) + - [`merge`](#merge) + + */ + func prod( + tl :Trie, + tr :Trie, + op :(K1,V1,K2,V2) -> ?(Key,V3), + k3_eq :(K3,K3) -> Bool + ) + : Trie + { + /**- binary case: merge disjoint results: */ + func merge (a:Trie, b:Trie) : Trie = + mergeDisjoint(a, b, k3_eq); + + /**- `foldUp` "squared"; something like "`foldUp^2((tl, tr), merge, (insert null >( + tl, merge, + func (k1:K1, v1:V1) : Trie { + foldUp>( + tr, merge, + func (k2:K2, v2:V2) : Trie { + switch (op(k1, v1, k2, v2)) { + case null null; + case (?(k3, v3)) { (insert(null, k3, k3_eq, v3)).0 }; + } + }, + null + ) + }, + null + ) + }; + + /** + `foldUp` + ------------ + This operation gives a recursor for the internal structure of + tries. Many common operations are instantiations of this function, + either as clients, or as hand-specialized versions (e.g., see map, + mapFilter, exists and forAll below). + */ func foldUp(t:Trie, bin:(X,X)->X, leaf:(K,V)->X, empty:X) : X { func rec(t:Trie) : X { switch t { @@ -597,8 +809,12 @@ let Trie = new { rec(t) }; - // Fold over the key-value pairs of the trie, using an accumulator. - // The key-value pairs have no reliable or meaningful ordering. + /** + `fold` + --------- + Fold over the key-value pairs of the trie, using an accumulator. + The key-value pairs have no reliable or meaningful ordering. + */ func fold(t:Trie, f:(K,V,X)->X, x:X) : X { func rec(t:Trie, x:X) : X { switch t { @@ -618,7 +834,11 @@ let Trie = new { rec(t, x) }; - // specialized foldUp operation. + /** + `exists` + -------- + Test whether a given key-value pair is present, or not. + */ func exists(t:Trie, f:(K,V)->Bool) : Bool { func rec(t:Trie) : Bool { switch t { @@ -638,7 +858,11 @@ let Trie = new { }; - // specialized foldUp operation. + /** + `forAll` + --------- + Test whether all key-value pairs have a given property. + */ func forAll(t:Trie, f:(K,V)->Bool) : Bool { func rec(t:Trie) : Bool { switch t { @@ -657,10 +881,51 @@ let Trie = new { rec(t) }; - // specialized foldUp operation. - // Test for "deep emptiness": subtrees that have branching structure, - // but no leaves. These can result from naive filtering operations; - // filter uses this function to avoid creating such subtrees. + /** + `count` + -------- + Count the number of entries in the trie. + */ + func count(t:Trie):Nat{ + foldUp + (t, + func(n:Nat,m:Nat):Nat{n+m}, + func(_:K,_:V):Nat{1}, + 0) + }; + + /** + `toArray` + -------- + Gather the collection of key-value pairs into an array. + + To do: make this more efficient, using a single array allocation. + */ + func toArray(t:Trie,f:(K,V)->[W]):[W]{ + func arrayAppend(x:[W],y:[W]):[W] { + Array_tabulate ( + x.len() + y.len(), + func (i:Nat) : W { + if (i >= x.len()) { y[i - x.len()] } + else { x[i] } + } + ) + }; + foldUp + (t, + arrayAppend, + func(k:K, v:V):[W]{f(k,v)}, + []) + }; + + /** + `isEmpty` + ----------- + specialized foldUp operation. + Test for "deep emptiness": subtrees that have branching structure, + but no leaves. These can result from naive filtering operations; + filter uses this function to avoid creating such subtrees. + */ func isEmpty(t:Trie) : Bool { func rec(t:Trie) : Bool { switch t { @@ -676,6 +941,11 @@ let Trie = new { rec(t) }; + /** + `filter` + ----------- + filter the key-value pairs by a given predicate. + */ func filter(t:Trie, f:(K,V)->Bool) : Trie { func rec(t:Trie) : Trie { switch t { @@ -705,6 +975,11 @@ let Trie = new { rec(t) }; + /** + `mapFilter` + ----------- + map and filter the key-value pairs by a given partial mapping function. + */ func mapFilter(t:Trie, f:(K,V)->?W) : Trie { func rec(t:Trie) : Trie { switch t { @@ -741,13 +1016,18 @@ let Trie = new { rec(t) }; - // Test for equality, but naively, based on structure. - // Does not attempt to remove "junk" in the tree; - // For instance, a "smarter" approach would equate - // `#bin{left=#empty;right=#empty}` - // with - // `#empty`. - // We do not observe that equality here. + /** + `equalStructure` + ------------------ + + Test for equality, but naively, based on structure. + Does not attempt to remove "junk" in the tree; + For instance, a "smarter" approach would equate + `#bin{left=#empty;right=#empty}` + with + `#empty`. + We do not observe that equality here. + */ func equalStructure( tl:Trie, tr:Trie, @@ -781,4 +1061,196 @@ let Trie = new { rec(tl, tr) }; + // Equality function for two `Key`s, in terms of equaltiy of `K`'s. + func keyEq(keq:(K,K) -> Bool) : ((Key,Key) -> Bool) = { + func (key1:Key, key2:Key) : Bool = + (Hash.hashEq(key1.hash, key2.hash) and keq(key1.key, key2.key)) + }; + + /** + Helpers for missing variants + ============================== + Until ActorScript has variant types, we need various helper functions here. They are uninteresting. + */ + // @Omit: + + // XXX: until AST-42: + func isNull(x : ?X) : Bool { + switch x { + case null { true }; + case (?_) { false }; + }; + }; + + // XXX: until AST-42: + func assertIsNull(x : ?X) { + switch x { + case null { assert(true) }; + case (?_) { assert(false) }; + }; + }; + + // XXX: until AST-42: + func makeEmpty() : Trie + = null; + + // XXX: until AST-42: + func assertIsEmpty(t : Trie) { + switch t { + case null { assert(true) }; + case (?_) { assert(false) }; + }; + }; + + // XXX: until AST-42: + func makeBin(l:Trie, r:Trie) : Trie { + ?(new {left=l; right=r; keyvals=null; }) + }; + + // XXX: until AST-42: + func isBin(t:Trie) : Bool { + switch t { + case null { false }; + case (?t_) { + switch (t_.keyvals) { + case null { true }; + case _ { false }; + }; + }; + } + }; + + // XXX: until AST-42: + func makeLeaf(kvs:AssocList,V>) : Trie { + ?(new {left=null; right=null; keyvals=kvs }) + }; + + // XXX: until AST-42: + func matchLeaf(t:Trie) : ?List<(Key,V)> { + switch t { + case null { null }; + case (?t_) { + switch (t_.keyvals) { + case (?keyvals) ?(?(keyvals)); + case (_) null; + } + }; + } + }; + + // XXX: until AST-42: + func isLeaf(t:Trie) : Bool { + switch t { + case null { false }; + case (?t_) { + switch (t_.keyvals) { + case null { false }; + case _ { true }; + } + }; + } + }; + // XXX: until AST-42: + func assertIsBin(t : Trie) { + switch t { + case null { assert(false) }; + case (?n) { + assertIsNull<((Key,V),AssocList,V>)>(n.keyvals); + }; + } + }; + + // XXX: until AST-42: + func getLeafKey(t : Node) : Key { + assertIsNull>(t.left); + assertIsNull>(t.right); + switch (t.keyvals) { + case (?((k,v),_)) { k }; + case (null) { /* ERROR */ getLeafKey(t) }; + } + }; + + // XXX: this helper is an ugly hack; we need real sum types to avoid it, I think: + func getLeafVal(t : Node) : V { + assertIsNull>(t.left); + assertIsNull>(t.right); + switch (t.keyvals) { + case (?((k,v),_)) { v }; + case null { /* ERROR */ getLeafVal(t) }; + } + }; + + + /** + More helpers + ============================== + */ + + + /** + `buildNewPath` + --------------- + helper function for constructing new paths of uniform length + */ + + func buildNewPath(bitpos:Nat, k:Key, ov:?V) : Trie { + func rec(bitpos:Nat) : Trie { + if ( bitpos < HASH_BITS ) { + // create new bin node for this bit of the hash + let path = rec(bitpos+1); + let bit = Hash.getHashBit(k.hash, bitpos); + if (not bit) { + ?(new {left=path; right=null; keyvals=null}) + } + else { + ?(new {left=null; right=path; keyvals=null}) + } + } else { + // create new leaf for (k,v) pair, if the value is non-null: + switch ov { + case null { ?(new {left=null; right=null; keyvals=null }) }; + case (?v) { ?(new {left=null; right=null; keyvals=?((k,v),null) }) }; + } + } + }; + rec(bitpos) + }; + }; + + +/** + +Future work +============= + +Tests +--------- +more regression tests for everything documented in the [module interface](#module-interface). + + +Variant types +------------------------ +See [AST-42]() (sum types); we want this type definition instead: + + ``` + // Use a sum type (AST-42) + type Trie = { #leaf : LeafNode; #bin : BinNode; #empty }; + type BinNode = { left:Trie; right:Trie }; + type LeafNode = { key:K; val:V }; + ``` + +Adaptive path lengths +---------------------- + +Currently we assume a uniform path length. This can be inefficient, +and requires careful tuning. In the future, we could adapt the path +length of each subtree to its cardinality; this wouild avoid +needlessly long paths, or paths that are too short for their subtree's +size. + +Iterator objects +------------------- +for use in 'for ... in ...' patterns + +*/ diff --git a/test/bugs/unboundclass.as b/test/bugs/unboundclass.as new file mode 100644 index 00000000000..52737cb8982 --- /dev/null +++ b/test/bugs/unboundclass.as @@ -0,0 +1,10 @@ + +class Foo(f1:Int -> Int, f2:Int -> Int) { }; + +class Bar () { + + let Bar = Foo(g, g); + + private g(n:Int) : Int = n + 1; + +} diff --git a/test/bugs/usedefbug.as b/test/bugs/usedefbug.as new file mode 100644 index 00000000000..54d9b0cddc3 --- /dev/null +++ b/test/bugs/usedefbug.as @@ -0,0 +1,13 @@ + + +class Bar () { + + private class Foo(f1:Int -> Int, f2:Int -> Int) { + private bomb = f1(666) + f2(666); + }; + + let Bar = Foo(g, g); + + private g(n:Int) : Int = n + 1; + +}