Page Numbers: Yes X: 306 Y: 0.9" First Page: 11
Margins: Top: 1.1" Bottom: 1.2"
Heading:
PROBLEM SET #2 LISP: LANGUAGE AND LITERATURE May 14, 1984
————————————————————————————————————————————
Appendices and Code
Appendix A: Some Preliminary Utilities:

;;; Code stored as: {turing}<3-lisp.problem-sets>parsing-utilities.3-lisp
;;; {phylum}<3-lisp>course>problem-sets>parsing-utilities.3-lisp

;;; =================================================================
;;; SEQUENCE UTILITIES
;;; =================================================================
;;; (MAPPEND fun args1 ... argsk) Appends together the results of
;;; ============================= mapping fun (of arity k) to the
;;; sequences of args args1 ... argsk.
(define MAPPEND
(lambda args
(append . (map . args))))
;;; (FILTER predicate sequence) Designates the sequence consisting
;;; =========================== of each element of sequence for which
;;; (predicate element) is true.
(define FILTER
(lambda [predicate sequence]
(mappend (lambda [element]
(if (predicate element)
[element]
[]))
sequence)))
;;; (COLLECT predicate selector sequence) Designates the sequence
;;; ===================================== of (selector element) for
;;; each element of sequence
;;; for which (predicate element) is true.
(define COLLECT
(lambda [predicate selector sequence]
(mappend (lambda [element]
(if (predicate element)
[(selector element)]
[]))
sequence)))
;;; =================================================================
;;; General purposes COMPOSE:
;;;
;;; ((compose f1 f2 ... fk) a1 a2 ... an) ; 1 =< k
;;;
;;; => (f1 (f2 ... (fk a1 a2 ... an)...)) ; 0 =< n
;;; =================================================================
(define COMPOSE
(lambda funs
(cond [(null funs) (lambda args args)]
[(null (rest funs)) (first funs)]
[$true
(lambda args
((first funs)
((compose . (rest funs)) . args)))])))
;;; CONSTANT
;;; ========
(define CONSTANT
(lambda [constant]
(lambda args constant)))

;;; ===============================================================
;;; STRING MANIPULATION ROUTINES
;;; ===============================================================
(define STRING-NULL
(lambda [string] (= string "")))
(define STRING-FIRST
(lambda [string] (nth-char 1 string)))
(define STRING-REST
(lambda [string] (substring 2 (string-length string) string)))
;;; (EVERY-CHARACTER predicate string) True just in case predicate
;;; ================================== is true of each character in
;;; string.
(define EVERY-CHARACTER
(lambda [predicate string]
(cond [(string-null string) $true]
[(predicate (string-first string))
(every-character predicate (string-rest string))]
[$true $false])))

;;; ===============================================================
;;; STRANGE CONTROL STRUCTURES
;;; ===============================================================
;;; DYNAMIC-CATCH
;;; DYNAMIC-THROW
;;; =============
(define DYNAMIC-CATCH
(rlambda [call env esc cont]
(cont (normalize (arg 1 call) env esc id))))
(define DYNAMIC-THROW
(rlambda [call env esc cont]
(normalize (arg 1 call) env esc id)))
;;; =================================================================
;;; OBJECTS:
;;; =================================================================
;;;
;;; General form:
;;;
;;; (object [init-var-1 ... init-var-k]
;;; [[var-1 init-1] ... [var-n init-n]]
;;; [<mess1> <fun1>]
;;; [<mess2> <fun2>]
;;; ...
;;; [<messk> <funk>])
;;; =================================================================
(define OBJECT
(letrec [[define-message
(lambda [name]
’(define ,name
(lambda args
(((first args) ,↑name) . (rest args)))))]]
(mlambda [call]
(letseq [[state-vars (arg 1 call)]
[inited-vars (arg 2 call)]
[pairs (tail 2 (pargs call))]
[fun-names (map (lambda [pair] (acons)) pairs)]]
’(begin
,(map (lambda [pair]
(define-message (first pair)))
pairs)
(lambda ,state-vars
(let ,inited-vars
(letrec ,(map (lambda [pair fun-name]
’[,fun-name ,(second pair)])
pairs
fun-names)
(lambda [message]
(cond . ,(map (lambda [pair fun-name]
’[(= message ,↑(first pair))
,fun-name])
pairs
fun-names)))))))))))
Appendix B: The Lexical Analyser:

;;; Code stored as: {turing}<3-lisp.problem-sets>lexical-analyser.3-lisp
;;; {phylum}<3-lisp>course>problem-sets>lexical-analyser.3-lisp

;;; ========================
;;; 3-LISP Lexical Analyser:
;;; ========================

;;; ===============================================================
;;; PUBLIC ROUTINES
;;; ===============================================================
;;; (INIT-LEXAN) Initializes all tables.
;;; ============
;;;
;;; (TOKEN-LIST string) A type of object that embodies the tokenization of a
;;; =================== given string. Supports the following messages:
;;;
;;; (CURRENT-TOKEN token-list) Returns the token currently being scanned.
;;; ==========================
;;;
;;; (GET-TOKEN token-list) Also returns the current token being scanned but
;;; ====================== moves past the token in the input. This is the
;;; normal method to be used in accessing tokens. It
;;; should be called once upon setting up a token-list object to initialize the
;;; lookahead token (next-tok).
;;;
;;; (NEXT-TOKEN token-list) Returns the ’look-ahead’ token, the token just
;;; ======================= about to be scanned. Useful for dispatching without
;;; actually reading the token.
;;;
;;; (TOKENIZE string) Designates the sequence of tokens into which string is
;;; ================= lexically analyzed. For example (tokenize "a (b)") would
;;; return ["a" "(" "b" ")"].
(define INIT-LEXAN
(lambda []
(set *special-chars* [ #( #) #[ #] #$ #’ #, #. #↑ #\ #{ #} ])
(set *white-space-chars* [ # cr ])
(set *delimiters* (append *white-space-chars* *special-chars*))
(set *digits* [ #0 #1 #2 #3 #4 #5 #6 #7 #8 #9 ])
(set *comment-begin* #;)
(set *comment-end* cr)
"ok"))
(define TOKEN-LIST
(let [[create-token-list
(object [string]
[[current-tok ""]
[next-tok ""]]
[GET-TOKEN
(lambda []
(let [[token-info (global-get-token string)]]
(set string (second token-info))
(set current-tok next-tok)
(set next-tok (first token-info))
current-tok))]
[CURRENT-TOKEN (lambda [] current-tok)]
[NEXT-TOKEN (lambda [] next-tok)])]]
(lambda [string]
(let [[tok-list (create-token-list string)]]
(get-token tok-list)
tok-list))))
;;; Not dealt with by this code:
;;;
;;; -- strings
;;; -- charats
;;; -- character quotation (%)
;;; TOKENIZE:
;;; =========
(define tokenize
(lambda [string]
(init-lexan)
(let [[a (token-list string)]]
(get-token a)
(letrec [[collect (lambda []
(let [[tok (get-token a)]]
(if (= tok ’eot)
[]
(cons tok (collect)))))]]
(collect)))))

;;; ===============================================================
;;; INTERNAL ROUTINES
;;; ===============================================================
;;; GLOBAL-GET-TOKEN takes a STRING and returns a rail of:
;;; ================ a) the prefix of the string corresponding to the
;;; first token in the string
;;; b) the rest of the string after the prefix
;;; In case there are no tokens left in the string, returns the
;;; handle ’eot (end of tokens).
;;;
;;; Used by GET-TOKEN (within INIT-LEXAN)
(define GLOBAL-GET-TOKEN
(lambda [string]
(add-to-token "" (strip-leading-whitespace string))))
;;; ADD-TO-TOKEN auxiliary function for GLOBAL-GET-TOKEN which
;;; ============ adds characters to a token found so far (SO-FAR)
;;; taking characters from a string STRING.
;;;
;;; Uses information from global-variables: *special-chars*
;;; defined by INIT-LEXAN: *delimiters*
(define ADD-TO-TOKEN
(lambda [so-far string]
(cond [(string-null string)
[(if (string-null so-far) ’eot so-far) string]]
[(and (string-null so-far)
(member (string-first string) *special-chars*))
[(substring 1 1 string) (string-rest string)]]
[(member (string-first string) *delimiters*)
[so-far string]]
[$true
(add-to-token (string-append so-far
(substring 1 1 string))
(string-rest string))])))
;;; STRIP-LEADING-WHITESPACE removes all leading white-space and comments
;;; ======================== from a string. White-space is defined as
;;; any character in the global variable
;;; *white-space-chars* (defined by INIT-LEXAN).
;;; Comments are any string of characters starting with the character
;;; bound to *comment-begin*, up through the character *comment-end*
(define STRIP-LEADING-WHITESPACE
(lambda [string]
(cond [(string-null string) string]
[(member (string-first string) *white-space-chars*)
(strip-leading-whitespace (string-rest string))]
[(= (string-first string) *comment-begin*)
(strip-leading-whitespace (strip-comment string))]
[$true
string])))
;;; STRIP-COMMENT Removes one full comment prefix from a string. A comment
;;; ============= is defined as any string of characters starting with the
;;; globally bound *comment-begin* character, up through and
;;; including *comment-end* (both defined by INIT-LEXAN).
(define STRIP-COMMENT
(lambda [string]
(cond [(string-null string) string]
[(= (string-first string) *comment-end*)
(string-rest string)]
[$true
(strip-comment (string-rest string))])))
Appendix C: A "Doubly Implicit" 3-LISP Recognizer:

;;; Code stored as: {turing}<3-lisp.problem-sets>3-lisp-recognizer.3-lisp
;;; {phylum}<3-lisp>course>problem-sets>3-lisp-recognizer.3-lisp

;;;==========================================================================
;;; Simple 3-LISP Expression Recognizer
;;;==========================================================================
;;;
;;; Recognizes 3-LISP expressions including rails, pairs and simple symbols.
;;; Does not handle numerals, handles, strings, or booleans.
;;; Built on top of the 3-LISP Lexical Analyser
;;; -------------------------------------------
;;; Assumed grammar: (<token> is the primitive lexical item)
;;;
;;; <expression> ::= <rail-exp> | <pair-exp> | <identifier>
;;;
;;; <rail-exp> ::= "[" <expression>* "]"
;;; <pair-exp> ::= "(" <expression>+ ")"
;;; <identifier> ::= <alphanumeric token>
;;; RECOGNIZE Main recognition routine. Returns a message
;;; ========= identifying whether or not the STRING encodes a single
;;; well-formed 3-LISP expression. This version ignores
;;; the information signaled by an error.
(define RECOGNIZE
(lambda [string]
(init-lexan)
(letseq [[s (token-list string)]
[answer (dynamic-catch (recognize-expression s))]]
(if (and answer (= (next-token s) ’eot))
"Accepted"
"Rejected"))))

;;; Ancillary recognition functions:
;;;
;;; All the following read functions are responsible for reading just one
;;; type of 3-LISP expression. They use GET-TOKEN and other such methods,
;;; defined in the lexical-analyser package, on TOKEN-LIST objects. They
;;; call the procedure ERROR-IN-RECOGNITION immediately upon finding
;;; a grammatical error of any sort, which does not return (the procedures
;;; below are not designed to do anything sensible about continuing from
;;; errors), but instead should error or do a DYNAMIC-THROW (q.v.).

;;; RECOGNIZE-EXPRESSION Attempts to recognize an arbitrary 3-LISP
;;; ==================== expression.
(define RECOGNIZE-EXPRESSION
(lambda [s]
(select (next-token s)
["[" (recognize-rail s)]
["(" (recognize-pair s)]
[$true (recognize-identifier s)])))

;;; RECOGNIZE-RAIL attempts to recognize a 3-LISP rail expression
;;; ==============
(define RECOGNIZE-RAIL
(lambda [s]
(get-token s) ; skip the open bracket
(recognize-exp-list s "]")
(get-token s) ; skip the close bracket
$true))

;;; RECOGNIZE-EXP-LIST attempts to recognize a list of 3-LISP expressions
;;; ================== delimited by (but not including) the token
;;; ENDING-TOKEN.
(define RECOGNIZE-EXP-LIST
(lambda [s ending-token]
(if (= (next-token s) ending-token)
$true
(begin (recognize-expression s)
(recognize-exp-list s ending-token)))))

;;; RECOGNIZE-PAIR attempts to recognize a 3-LISP pair expression
;;; ==============
(define RECOGNIZE-PAIR
(lambda [s]
(get-token s) ; skip the open parenthesis
(recognize-expression s) ; there must be at least one exp.
(recognize-exp-list s ")")
(get-token s) ; skip the close parenthesis
$true))

;;; RECOGNIZE-IDENTIFIER attempts to recognize a 3-LISP identifier
;;; ====================
(define RECOGNIZE-IDENTIFIER
(lambda [s]
(let [[tok (get-token s)]]
(cond [(member tok [’eot "[" "]" "(" ")"])
(error-in-recognition tok)]
[(every-character
(lambda [ch] (not (member ch *special-chars*)))
tok)
$true]
[$true (error-in-recognition tok)]))))
;;; ERROR-IN-RECOGNITION Signals an error.
;;; ====================
(define ERROR-IN-RECOGNITION
(lambda [string]
(dynamic-throw $false)))
Appendix D: A "Doubly Implicit" 3-LISP Internalizer:

;;; Code stored as: {turing}<3-lisp.problem-sets>3-lisp-internalizer.3-lisp
;;; {phylum}<3-lisp>course>problem-sets>3-lisp-internalizer.3-lisp

;;;==========================================================================
;;; Simple 3-LISP Expression Internalizer ;;;==========================================================================
;;;
;;; Internalizes 3-LISP expressions including rails, pairs and simple symbols.
;;; Does not handle numerals, handles, strings, or booleans. Builds internal
;;; structure for the expression.
;;; Assumed grammar: (<token> is the primitive lexical item)
;;;
;;; <expression> ::= <rail-exp> | <pair-exp> | <identifier>
;;;
;;; <rail-exp> ::= "[" <expression>* "]"
;;; <pair-exp> ::= "(" <expression>+ ")"
;;; <identifier> ::= <alphanumeric token>
;;; (INTERNALIZATION string) Builds the 3-LISP internal structure corresponding to string.
;;; ========================
(define INTERNALIZATION
(lambda [string]
(init-lexan)
(letseq [[s (token-list string)]
[answer (dynamic-catch (internalize-expression s))]]
(if (and (not (= answer $false))
(= (next-token) ’eot))
answer
"Rejected"))))

;;; Ancillary internalize functions:
;;;
;;; All the following internalize functions are responsible for internalizing
;;; just one type of 3-LISP expression. They use GET-TOKEN and other such methods,
;;; defined in the lexical-analyser package, on TOKEN-LIST objects. They
;;; call the procedure ERROR-IN-INTERNALIZATION immediately upon finding
;;; a grammatical error of any sort, which should not return (the procedures
;;; below are not designed to do anything sensible about continuing from
;;; errors), but instead should error or do a DYNAMIC-THROW (q.v.).
;;;
;;; All of the procedures return handles to the 3-LISP structure built.

;;; INTERNALIZE-EXPRESSION Attempts to internalize an arbitrary 3-LISP
;;; ====================== expression, returning the corresponding structure.
(define INTERNALIZE-EXPRESSION
(lambda [s]
(select (next-token s)
["[" (internalize-rail s)]
["(" (internalize-pair s)]
[$true (internalize-identifier s)])))

;;; INTERNALIZE-RAIL attempts to internalize a 3-LISP rail expression
;;; ================
(define INTERNALIZE-RAIL
(lambda [s]
(get-token s) ; skip the open bracket
(let [[answer (internalize-exp-list s "]")]]
(get-token s) ; skip the close bracket
(rcons . answer))))

;;; INTERNALIZE-PAIR attempts to internalize a 3-LISP pair expression
;;; ================
(define INTERNALIZE-PAIR
(lambda [s]
(get-token s) ; skip the open parenthesis
(let [[answer (internalize-exp-list s ")")]]
(get-token s) ; skip the close parenthesis
(pcons (first answer) (rcons . (rest answer))))))

;;; INTERNALIZE-EXP-LIST attempts to internalize a list of 3-LISP expressions
;;; ==================== delimited by (but not including) the token
;;; ENDING-TOKEN, returning a rail of handles
;;; designating the structures notated by the various elements in the
;;; list read. (Whew.)
(define INTERNALIZE-EXP-LIST
(lambda [s ending-token]
(if (= (next-token s) ending-token)
[]
(cons (internalize-expression s)
(internalize-exp-list s ending-token)))))

;;; INTERNALIZE-IDENTIFIER attempts to internalize a 3-LISP identifier
;;; ======================
(define INTERNALIZE-IDENTIFIER
(lambda [s]
(let [[tok (get-token s)]]
(if (member tok [’eot "[" "]" "(" ")"])
(error-in-internalization)
(atom-notated tok)))))
;;; ERROR-IN-INTERNALIZATION Signals an error.
;;; ========================
(define ERROR-IN-INTERNALIZATION
(lambda [string]
(dynamic-throw $false)))
Appendix E: A Simple Context-Free Recognizer with Explicit Grammars:

;;; Code stored as: {turing}<3-lisp.problem-sets>general-recognizer.3-lisp
;;; {phylum}<3-lisp>course>problem-sets>general-recognizer.3-lisp

;;; =======================================================================
;;;
;;; RECURSIVE DESCENT RECOGNIZER
;;;
;;; =======================================================================

;;; Grammars are to be represented as a sequence of rules, where each rule is
;;; a sequence of a left-hand side and one or more right-hand side symbols.
;;; For example:
;;;
;;; (set *sample* [[’s ’np ’vp]
;;; [’vp ’v np]
;;; [’vp ’v]
;;; [’v "slept"]
;;; [’v "loved"]
;;; [’v "wielded"]
;;; [’np "arthur"]
;;; [’np "gwen"]
;;; [’np "excalibur"]])
;;;
;;; then: (recognize "arthur slept" ’s *sample*) => "accepted"
;;; (recognize "arthur wielded excalibur" ’s *sample*) => "accepted"
;;; (recognize "arthur gwen" ’s *sample*) => "rejected"
(define RECOGNIZE
(lambda [expression start-symbol grammar]
(if (full-recognize (recognize-symbol expression start-symbol grammar))
"accepted"
"rejected")))
(define FULL-RECOGNIZE
(lambda [list-of-tails]
(some null list-of-tails)))
(define RECOGNIZE-SYMBOL
(lambda [expression symbol grammar]
(if (matches symbol (first expression) grammar)
[(rest expression)]
(mappend (lambda [rhs] (recognize-frontier expression rhs grammar))
(rhsides symbol grammar)))))
(define RECOGNIZE-FRONTIER
(lambda [expression frontier grammar]
(cond [(null frontier) [expression]]
[(null expression) []]
[$t (recognize-all (recognize-symbol expression (first frontier) grammar)
(rest frontier)
grammar)])))
(define RECOGNIZE-ALL
(lambda [fragments frontier grammar]
(mappend (lambda [fragment]
(recognize-frontier fragment frontier grammar))
fragments)))
(define MATCHES
(lambda [nonterm word grammar]
(member [nonterm word] grammar)))
(define RHSIDES
(lambda [nonterm grammar]
(collect (lambda [rule] (= nonterm (first rule)))
rest
grammar)))
Appendix F: A Simple Context-Free Parser with Explicit Grammars:

;;; Code stored as: {turing}<3-lisp.problem-sets>general-parser.3-lisp
;;; {phylum}<3-lisp>course>problem-sets>general-parser.3-lisp

;;; =======================================================================
;;;
;;; RECURSIVE DESCENT PARSER
;;;
;;; =======================================================================
;;; Similar to Recognizer, but returns the parse:
;;;
;;; Basic idea is to have all the parsing routines return a sequence of
;;; parse/word-sequence pairs, where the parse is the parse so far and
;;; the word-sequence is the postfix of the input sequence after the part
;;; that has been parsed so far.
(define PARSE
(lambda [expression start-symbol grammar]
(let [[good-parses
(filter (compose null second)
first
(parse-symbol expression start-symbol grammar))]]
(if (null good-parses)
"rejected"
good-parses))))
(define PARSE-SYMBOL
(lambda [expression symbol grammar]
(if (matches symbol (first expression) grammar)
[[[symbol (first expression)] (rest expression)]]
(mappend (lambda [rhs]
(parse-frontier symbol expression rhs grammar))
(rhsides symbol grammar)))))
(define PARSE-FRONTIER
(lambda [symbol expression frontier grammar]
(cond [(null frontier) [[symbol expression]]]
[(null expression) []]
[$t (map (lambda [parse]
[[symbol (first parse)]
(second parse)])
(parse-all (parse-symbol expression
(first frontier)
grammar)
(rest frontier)
grammar))])))
(define PARSE-ALL
(lambda [fragments frontier grammar]
(mappend (lambda [fragment]
(parse-frontier (first fragment)
(second fragment)
frontier
grammar))
fragments)))
;;; Other routines are as in Appendix E.
Appendix G: A Context-Free Transducer with Explicit Grammars and Augments:

;;; Code stored as: {turing}<3-lisp.problem-sets>general-transducer.3-lisp
;;; {phylum}<3-lisp>course>problem-sets>general-transducer.3-lisp

;;; =======================================================================
;;;
;;; RECURSIVE DESCENT TRANSDUCER with AUGMENTS
;;;
;;; =======================================================================
;;; Modified version takes functional augments and applies them to constituents
;;; as they are found.
;;; An analysis is a rail of the form: [transduction tail]
;;; A transduction is a function to be applied to the transductions
;;; of the remaining constituents to be found, or (in the base case)
;;; any 3-LISP entity. The transductions are gotten from the grammar.
;;; The grammar format is a rail of rules, each rule of the form:
;;;
;;; [lhs transduction rhs]
(define PARSE
(lambda [string start-symbol grammar]
(let [[good-parses
(collect (compose null second)
first
(parse-symbol id
(tokenize string)
start-symbol
grammar))]]
(if (null good-parses)
"rejected"
good-parses))))
(define PARSE-SYMBOL
(lambda [trans tail symbol grammar]
(if (matches symbol (first tail)) ; If direct match, return single
[[(trans (first tail)) (rest tail)]] ; good parse, transduced
(map (lambda [analysis]
[(trans (first analysis))
(second analysis)])
(mappend (lambda [rhs] ; otherwise trans analyses
(parse-frontier (first rhs) ; obtainable by the recursive
tail ; parsing calls
(rest rhs)
grammar))
(rhsides symbol grammar))))))
(define PARSE-FRONTIER
(lambda [trans tail frontier grammar]
(cond [(null frontier) [[trans tail]]]
[(null tail) []]
[$true (parse-all (parse-symbol trans tail (first frontier) grammar)
(rest frontier)
grammar)])))
(define PARSE-ALL
(lambda [analyses frontier grammar]
(mappend (lambda [analysis]
(parse-frontier (first analysis)
(second analysis)
frontier
grammar))
analyses)))
(define MATCHES
(lambda [nonterm word]
(cond [(= nonterm ’atom-expression) (atom-expression word)]
[(string word) (= nonterm word)]
[$T $F])))
(define ATOM-EXPRESSION
(lambda [word]
(and (every-character
(lambda [ch] (not (member ch *special-chars*)))
word)
(not (every-character
(lambda [ch] (member ch *digits*))
word)))))
(define RHSIDES
(lambda [nonterm grammar]
(collect (lambda [rule] (= nonterm (first rule)))
rest
grammar)))
Appendix H: Some Test Grammars:

;;; Code stored as: {turing}<3-lisp.problem-sets>sample-grammars.3-lisp
;;; {phylum}<3-lisp>course>problem-sets>sample-grammars.3-lisp

;;; =======================================================================
;;; Sample grammars:
(set *G*
[[’s (lambda [np] (lambda [vp] [’s np vp])) ’np ’vp]
[’vp (lambda [v] (lambda [np] [’vp v np])) ’v ’np]
[’vp (lambda [v] [’vp v]) ’v]
[’v (lambda [lex] [’v lex]) "slept"]
[’v (lambda [lex] [’v lex]) "loved"]
[’v (lambda [lex] [’v lex]) "wielded"]
[’np (lambda [lex] [’np lex]) "arthur"]
[’np (lambda [lex] [’np lex]) "gwen"]
[’np (lambda [lex] [’np lex]) "excalibur"]
])
(set montague-gram
[[’s (lambda [np] (lambda [vp] (pcons vp np))) ’np ’vp]
[’vp (lambda [v] (lambda [np] (pcons v np))) ’v ’np]
[’vp (lambda [v] v) ’v]
[’v (lambda [lex] ’sleep/ ) "slept"]
[’v (lambda [lex] ’love/ ) "loved"]
[’v (lambda [lex] ’wield/ ) "wielded"]
[’np (lambda [lex] ’arthur/ ) "arthur"]
[’np (lambda [lex] ’gwen/ ) "gwen"]
[’np (lambda [lex] ’excalibur/ ) "excalibur"]
])
(set *G2*
[[’s ’np ’vp]
[’vp ’v1 ’np]
[’vp ’v0]
[’v0 ’slept]
[’v0 ’loved]
[’v1 ’loved]
[’v1 ’wielded]
[’np ’arthur]
[’np ’gwen]
[’np ’excalibur]])
(set description-grammar
[[’expr (lambda [pair] [’expr pair])
’pair]
[’expr (lambda [rail] [’expr rail])
’rail]
[’expr (lambda [atom] [’expr [’atom atom]])
’atom-expression]
[’expr (lambda [numexpr] [’expr [’numeral numexpr]])
’numeral-expression]
[’pair (lambda [paren]
(lambda [fun]
(lambda [args] [’pair fun (cons ’pair-args args)])))
"(" ’expr ’pair-args]
[’pair-args (lambda [paren] [])
")"]
[’pair-args (lambda [first]
(lambda [rest] (cons first rest)))
’expr ’pair-args]
[’rail (lambda [bracket]
(lambda [elements] (cons ’rail elements)))
"[" ’rail-args]
[’rail-args (lambda [bracket] [])
"]"]
[’rail-args (lambda [first]
(lambda [rest] (cons first rest)))
’expr ’rail-args]])
(set recognition-grammar
[[’expr id ’pair]
[’expr id ’rail]
[’expr id ’atom-expression]
[’pair (lambda [pren] (lambda [fun] (lambda [args] $true)))
"(" ’expr ’pair-args]
[’pair-args (lambda [pren] $true) ")"]
[’pair-args (lambda [first] (lambda [rest] $true))
’expr ’pair-args]
[’rail (lambda [brak] (lambda [elements] $true))
"[" ’rail-args]
[’rail-args (lambda [brak] $true) "]"]
[’rail-args (lambda [first] (lambda [rest] $true))
’expr ’rail-args]])
(set structure-grammar
[[’expr id ’pair]
[’expr id ’rail]
[’expr (lambda [tok] (atom-notated tok)) ’atom-expression]
[’pair (lambda [pren] (lambda [fun] (lambda [args]
(pcons fun (rcons . args)))))
"(" ’expr ’pair-args]
where the transduction is a function to be applied to the transductions of the remainig constituents to be found, or (in the base case) any 3-LISP entity. [’pair-args (lambda [first] (lambda [rest] (cons first rest)))
’expr ’pair-args]
[’rail (lambda [brak] (lambda [elements] (rcons . elements)))
"[" ’rail-args]
[’rail-args (lambda [brak] []) "]"]
[’rail-args (lambda [first] (lambda [rest] (cons first rest)))
’expr ’rail-args]])
(define NORM
(lambda [exp]
(normalize exp global standard-escape id)))
(set process-grammar
[[’expr id ’pair]
[’expr id ’rail]
[’expr
(lambda [tok] (binding (atom-notated tok) global))
’atom-expression]
[’expr
(lambda [numexp] (intern numexp))
’numeral-expression]
[’pair
(lambda [paren]
(lambda [fun]
(lambda [args]
(norm (pcons fun (rcons . args))))))
"(" ’expr ’pair-args]
[’pair-args
(lambda [paren] [])
")"]
[’pair-args
(lambda [first]
(lambda [rest]
(cons first rest)))
’expr ’pair-args]
[’rail
(lambda [bracket]
(lambda [elements]
(norm (rcons . elements))))
"[" ’rail-args]
[’rail-args
(lambda [bracket] [])
"]"]
[’rail-args
(lambda [first]
(lambda [rest]
(cons first rest)))
’expr ’rail-args]])
Test Cases:
;;; Test Cases:
(parse-symbol [(lambda [x] x) [’arthur ’slept] ] ’s montague-gram)
(parse "arthur slept" ’s *g*)
(parse "arthur wielded excalibur" ’s montague-gram)
(parse-symbol [(lambda [x] x) [’arthur ’slept] ] ’s *g*)
(parse "(lambda [a b] c d)" ’expr g3)
;;; The following example can be run using the recognition-grammar,
;;; the structure grammar, or the description grammar. However, the
;;; engendered computation barely fits in 3-LISP (on the Dorado).
;;; Be forewarned.
(parse "

;;; This is a test of the top-down
;;; recursive descent parser

(lambda [x y]
(x [y]))

;;; end of test


"

’expr description-grammar)
;;; =======================================================================