Compare commits

..

1 Commits

@ -1,57 +0,0 @@
name: CI
on: [push, pull_request]
jobs:
run:
name: "Build using Racket '${{ matrix.racket-version }}' (${{ matrix.racket-variant }})"
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
racket-version: ["6.6", "6.7", "6.8", "6.9", "6.10.1", "6.11", "6.12", "7.0", "7.1", "7.2", "7.3", "7.4", "7.5", "7.6", "7.7", "7.8", "7.9", "8.0", "8.1", "8.2", "8.3", "current"]
racket-variant: ["BC", "CS"]
# CS builds are only provided for versions 7.4 and up so avoid
# running the job for prior versions.
exclude:
- {racket-version: "6.6", racket-variant: "CS"}
- {racket-version: "6.7", racket-variant: "CS"}
- {racket-version: "6.8", racket-variant: "CS"}
- {racket-version: "6.9", racket-variant: "CS"}
- {racket-version: "6.10.1", racket-variant: "CS"}
- {racket-version: "6.11", racket-variant: "CS"}
- {racket-version: "6.12", racket-variant: "CS"}
- {racket-version: "7.0", racket-variant: "CS"}
- {racket-version: "7.1", racket-variant: "CS"}
- {racket-version: "7.2", racket-variant: "CS"}
- {racket-version: "7.3", racket-variant: "CS"}
steps:
- name: Checkout
uses: actions/checkout@master
- uses: Bogdanp/setup-racket@v0.11
with:
distribution: 'full'
version: ${{ matrix.racket-version }}
variant: ${{ matrix.racket-variant }}
- name: Install BR parser tools
run: raco pkg install --deps search-auto https://github.com/mbutterick/br-parser-tools.git?path=br-parser-tools-lib
- name: Run the br-parser-tools tests
run: xvfb-run raco test -p br-parser-tools-lib
- name: Install brag-lib
run: raco pkg install --deps search-auto https://github.com/mbutterick/brag.git?path=brag-lib
- name: Run the brag-lib tests
run: xvfb-run raco test -p brag-lib
- name: Install brag
run: raco pkg install --deps search-auto https://github.com/mbutterick/brag.git?path=brag
- name: Run the brag tests
run: xvfb-run raco test -p brag

3
.gitignore vendored

@ -14,6 +14,3 @@ Icon
# Files that might appear on external disk
.Spotlight-V100
.Trashes
brag/*.html
brag/*.css
brag/*.js

@ -0,0 +1,50 @@
# adapted from
# https://github.com/greghendershott/travis-racket/blob/master/.travis.yml
# Thanks Greg!
language: c
sudo: false
env:
global:
- RACKET_DIR=~/racket
matrix:
# - RACKET_VERSION=6.0
# - RACKET_VERSION=6.1
# - RACKET_VERSION=6.2
- RACKET_VERSION=6.3
- RACKET_VERSION=6.4
- RACKET_VERSION=6.5
- RACKET_VERSION=6.6
- RACKET_VERSION=6.7
- RACKET_VERSION=6.8
- RACKET_VERSION=6.9
- RACKET_VERSION=6.10
- RACKET_VERSION=6.11
- RACKET_VERSION=6.12
- RACKET_VERSION=HEAD
# You may want to test against certain versions of Racket, without
# having them count against the overall success/failure.
matrix:
allow_failures:
#- env: RACKET_VERSION=HEAD
# Fast finish: Overall build result is determined as soon as any of
# its rows have failed, or, all of its rows that aren't allowed to
# fail have succeeded.
fast_finish: true
before_install:
- "export DISPLAY=:99.0" # needed for testing with `racket/gui`
- "sh -e /etc/init.d/xvfb start" # needed for testing with `racket/gui`
- git clone https://github.com/mbutterick/travis-racket.git
- cat travis-racket/install-racket.sh | bash # pipe to bash not sh!
- export PATH="${RACKET_DIR}/bin:${PATH}" #install-racket.sh can't set for us
script:
- cd .. # Travis did a cd into the dir. Back up, for the next:
# don't rely on package server
- travis_retry raco pkg install --deps search-auto https://github.com/mbutterick/br-parser-tools.git?path=br-parser-tools-lib
- raco test -p br-parser-tools-lib
- travis_retry raco pkg install --deps search-auto https://github.com/mbutterick/brag.git?path=brag
- raco test -p brag

@ -0,0 +1,165 @@
GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.

@ -1,9 +0,0 @@
MIT License for `brag` (code only)
© 2017-2020 Matthew Butterick
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

@ -1,24 +1,19 @@
## brag ![Build Status](https://github.com/mbutterick/brag/workflows/CI/badge.svg)
brag [![Build Status](https://travis-ci.org/mbutterick/brag.svg?branch=master)](https://travis-ci.org/mbutterick/brag)
=
Racket DSL for generating parsers from BNF grammars.
## Install
Licensed under the LGPL. See `LICENSE`.
`raco pkg install brag`
## Documentation
http://docs.racket-lang.org/brag/
Install
-
## License
MIT. See `LICENSE.md`
`raco pkg install brag`
## Project status
Complete. I will maintain the code but no major updates are planned.
Documentation
-
http://docs.racket-lang.org/brag/

@ -1,12 +0,0 @@
`brag` contains substantial portions of the software [`ragg`](https://github.com/jbclements/ragg)
MIT License for `ragg`
© 2012-2013 Danny Yoo
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

@ -1,6 +0,0 @@
#lang brag
start: A c def hello-world
A : "\"\101\\" ; A
c : '\'\U0063\\' ; c
def : "*\u64\\\"\\\x65f\"" ; de
hello-world : "\150\145\154\154\157\40\167\157\162\154\144"

@ -1,3 +0,0 @@
#lang brag
start : ( (X | X Y) A* )*

@ -1,4 +0,0 @@
#lang brag
start: next
next: "0"

@ -1,3 +0,0 @@
#lang brag
/top : sub
sub : "x"

@ -1,3 +0,0 @@
#lang brag
/top : sub
/sub : "x"

@ -1,3 +0,0 @@
#lang brag
/top : sub
@sub : "x"

@ -1,4 +0,0 @@
#lang info
(define test-omit-paths '("examples/simple-line-drawing/examples/letter-i.rkt"))
(define compile-omit-paths '("test" "examples"))

@ -1,39 +0,0 @@
#lang racket/base
(require brag/support)
(provide current-source
current-parser-error-handler
current-tokenizer-error-handler)
;; During parsing, we should define the source of the input.
(define current-source (make-parameter 'unknown))
;; When an parse error happens, we call the current-parser-error-handler:
(define current-parser-error-handler
(make-parameter
(lambda (tok-name tok-value offset line col span)
(raise (exn:fail:parsing
(format "Encountered parsing error near ~e (token ~e) while parsing ~e [line=~a, column=~a, offset=~a]"
tok-value tok-name
(current-source)
line col offset)
(current-continuation-marks)
(list (srcloc (current-source) line col offset span)))))))
;; When a tokenization error happens, we call the current-tokenizer-error-handler.
(define current-tokenizer-error-handler
(make-parameter
(lambda (tok-type tok-value offset line column span)
(raise (exn:fail:parsing
(string-append
(format "Encountered unexpected token of type ~e (value ~e) while parsing"
(if (memq tok-type (map string->symbol '("\n" "\t" "\r")))
(format "~a" tok-type)
tok-type) tok-value)
(if (or (current-source) line column offset)
(format " ~e [line=~a, column=~a, offset=~a]" (current-source) line column offset)
""))
(current-continuation-marks)
(list (srcloc (current-source) line column offset span)))))))

@ -1,177 +0,0 @@
#lang racket/base
(require (for-syntax racket/base "parser.rkt"))
(require br-parser-tools/lex
(prefix-in : br-parser-tools/lex-sre)
"parser.rkt"
"rule-structs.rkt"
(only-in brag/support from/to)
racket/string
syntax-color/racket-lexer
racket/match)
(provide lex/1 tokenize)
(module+ lex-abbrevs
(provide hide-char splice-char id-char letter digit NL id))
;; A newline can be any one of the following.
(define-lex-abbrev NL (:or "\r\n" "\r" "\n"))
;; reserved-chars = chars used for quantifiers & parse-tree filtering
(define-for-syntax quantifiers "+:*?{}") ; colon is reserved to separate rules and productions
(define-lex-trans reserved-chars
(λ(stx) #`(char-set #,(format "~a~a~a" quantifiers hide-char splice-char))))
(define-lex-trans hide-char-trans (λ(stx) #`(char-set #,(format "~a" hide-char))))
(define-lex-trans splice-char-trans (λ(stx) #`(char-set #,(format "~a" splice-char))))
(define-lex-abbrevs
[letter (:or (:/ "a" "z") (:/ #\A #\Z))]
[digit (:/ #\0 #\9)]
[id-char (:or letter digit (:& (char-set "+:*@!-.$%&/=?^_~<>") (char-complement (reserved-chars))))]
[hide-char (hide-char-trans)]
[splice-char (splice-char-trans)]
)
(define-lex-abbrev id (:& (complement (:+ digit)) (:+ id-char)))
(define-lex-abbrev id-separator (:or ":" "::="))
(define (unescape-double-quoted-lexeme lexeme start-pos end-pos)
;; use `read` so brag strings have all the notational semantics of Racket strings
(with-handlers ([exn:fail:read?
(λ (e) ((current-parser-error-handler)
#f
'error
lexeme
(position->pos start-pos)
(position->pos end-pos)))])
(list->string `(#\" ,@(string->list (read (open-input-string lexeme))) #\"))))
(define (convert-to-double-quoted lexeme)
;; brag supports single-quoted strings, for some reason
;; (Racket does not. A single quote denotes a datum)
;; let's convert a single-quoted string into standard double-quoted style
;; so we can use Racket's `read` function on it.
;; and thereby support all the standard Racket string elements:
;; https://docs.racket-lang.org/reference/reader.html#%28part._parse-string%29
(define outside-quotes-removed (string-trim lexeme "'"))
(define single-quotes-unescaped (string-replace outside-quotes-removed "\\'" "'"))
(define double-quotes-escaped (string-replace single-quotes-unescaped "\"" "\\\""))
(define double-quotes-on-ends (string-append "\"" double-quotes-escaped "\""))
double-quotes-on-ends)
(define-lex-abbrev backslash "\\")
(define-lex-abbrev single-quote "'")
(define-lex-abbrev escaped-single-quote (:: backslash single-quote))
(define-lex-abbrev double-quote "\"")
(define-lex-abbrev escaped-double-quote (:: backslash double-quote))
(define-lex-abbrev escaped-backslash (:: backslash backslash))
(define brag-lex
(lexer-src-pos
;; we delegate lexing of double-quoted strings to the Racket lexer (see below)
;; single-quoted string has to be handled manually (see lex/1 for details)
[(:: single-quote
(intersection
(:* (:or escaped-single-quote escaped-backslash (:~ single-quote)))
(complement (:: any-string backslash escaped-single-quote any-string)))
single-quote)
(token-LIT (unescape-double-quoted-lexeme (convert-to-double-quoted lexeme) start-pos end-pos))]
[(:or "()" "Ø" "")
(token-EMPTY lexeme)]
["("
(token-LPAREN lexeme)]
["["
(token-LBRACKET lexeme)]
[")"
(token-RPAREN lexeme)]
["]"
(token-RBRACKET lexeme)]
[hide-char
(token-HIDE lexeme)]
[splice-char
(token-SPLICE lexeme)]
["|"
(token-PIPE lexeme)]
[(:or "+" "*" "?"
(:: "{" (:* digit) (:? (:: "," (:* digit))) "}"))
(token-REPEAT lexeme)]
;; Skip whitespace
[whitespace
(return-without-pos (lex/1 input-port))]
;; skip multiline comments
[(from/to "(*" "*)") (return-without-pos (lex/1 input-port))]
;; Skip comments up to end of line
[(:: (:or "#" ";")
(complement (:: (:* any-char) NL (:* any-char)))
(:or NL ""))
(return-without-pos (lex/1 input-port))]
;; skip commas (concatenation is implied)
["," (return-without-pos (lex/1 input-port))]
[(eof)
(token-EOF lexeme)]
[(:: id (:* whitespace) id-separator)
(token-RULE_HEAD lexeme)]
[(:: hide-char id (:* whitespace) id-separator)
(token-RULE_HEAD_HIDDEN lexeme)]
[(:: splice-char id (:* whitespace) id-separator)
(token-RULE_HEAD_SPLICED lexeme)]
[id
(token-ID lexeme)]
;; We call the error handler for everything else:
[(:: any-char)
(let-values ([(rest-of-text end-pos-2)
(lex-nonwhitespace input-port)])
((current-parser-error-handler)
#f
'error
(string-append lexeme rest-of-text)
(position->pos start-pos)
(position->pos end-pos-2)))]))
(define (lex/1-with-racket-lexer ip [conversion-proc values])
;; delegate lexing of strings to the default Racket lexer
(define-values (line-start col-start pos-start) (port-next-location ip))
(define str (read ip))
(define-values (line-end col-end pos-end) (port-next-location ip))
(make-position-token (token-LIT (string-append "\"" str "\""))
(make-position pos-start line-start col-start)
(make-position pos-end line-end col-end)))
(define (lex/1 ip)
(match (peek-bytes 1 0 ip)
[#"\"" (lex/1-with-racket-lexer ip)]
;; it would be nice to also handle single-quoted strings with the Racket lexer
;; but we can only change the opening delimiter with the readtable.
;; for whatever reason, the closing delimiter still has to be a double quote.
;; "mapping a character to the same action as a " means that the character starts a string, but the string is still terminated with a closing ". "
;; https://docs.racket-lang.org/reference/readtables.html#%28def._%28%28quote._~23~25kernel%29._make-readtable%29%29
#;[#"'" (parameterize ([current-readtable (make-readtable (current-readtable)
#\' #\" #f)])
'lex-single-quoted-string
(lex/1-with-racket-lexer ip convert-to-double-quoted))]
[_ (brag-lex ip)]))
;; This is the helper for the error production.
(define lex-nonwhitespace
(lexer
[(:+ (char-complement whitespace))
(values lexeme end-pos)]
[any-char
(values lexeme end-pos)]
[(eof)
(values "" end-pos)]))
;; position->pos: position -> pos
;; Converts position structures from br-parser-tools/lex to our own pos structures.
(define (position->pos a-pos)
(pos (position-offset a-pos)
(position-line a-pos)
(position-col a-pos)))
;; tokenize: input-port -> (-> token)
(define (tokenize ip #:source [source (object-name ip)])
(λ () (parameterize ([file-path source])
(lex/1 ip))))

@ -1,10 +0,0 @@
#lang racket/base
(require brag/examples/codepoints
rackunit)
(check-equal? (parse-to-datum '("\"A\\" "'c\\" "*d\\\"\\ef\"" "hello world"))
'(start (A "\"A\\")
(c "'c\\")
(def "*d\\\"\\ef\"")
(hello-world "hello world")))

@ -1,17 +0,0 @@
#lang racket/base
(require rackunit
brag/support
brag/examples/subrule)
(define parse-next (make-rule-parser next))
(define parse-start (make-rule-parser start))
(check-equal? (syntax->datum (parse #f "0")) '(start (next "0")))
(check-equal? (syntax->datum (parse #f "0")) (syntax->datum (parse "0")))
(check-equal? (syntax->datum (parse-start #f "0")) '(start (next "0")))
(check-equal? (syntax->datum (parse-start #f "0")) (syntax->datum (parse-start "0")))
(check-equal? (syntax->datum (parse-next #f "0")) '(next "0"))
(check-equal? (syntax->datum (parse-next #f "0")) (syntax->datum (parse-next "0")))

@ -1,9 +0,0 @@
#lang racket/base
(require brag/examples/nested-repeats
rackunit)
(check-equal?
(syntax->datum (parse (list "X" "Y" "X")))
'(start "X" "Y" "X"))

@ -1,11 +0,0 @@
#lang racket/base
(require (prefix-in 1: brag/examples/top-level-cut-1)
(prefix-in 2: brag/examples/top-level-cut-2)
(prefix-in 3: brag/examples/top-level-cut-3)
brag/support
rackunit)
(check-equal? (1:parse-to-datum "x") '((sub "x")))
(check-equal? (2:parse-to-datum "x") '(("x")))
(check-equal? (3:parse-to-datum "x") '("x"))

@ -1,10 +0,0 @@
#lang info
(define collection 'multi)
(define deps '(["base" #:version "6.3"]
"br-parser-tools-lib"
"rackunit-lib"
"syntax-color-lib"))
(define implies '("br-parser-tools-lib"))

@ -29,8 +29,6 @@
@title{brag: a better Racket AST generator}
@author["Danny Yoo (95%)" "Matthew Butterick (5%)"]
@defmodulelang[brag]
@ -538,24 +536,16 @@ Here's the definition for
brag/examples/simple-line-drawing/semantics
#:read my-read
#:read-syntax my-read-syntax
#:info my-get-info
#:whole-body-readers? #t
(require brag/examples/simple-line-drawing/lexer
brag/examples/simple-line-drawing/grammar)
brag/examples/simple-line-drawing/grammar)
(define (my-read in)
(syntax->datum (my-read-syntax #f in)))
(syntax->datum (my-read-syntax #f in)))
(define (my-read-syntax src ip)
(list (parse src (tokenize ip))))
(define (my-get-info key default default-filter)
(case key
[(color-lexer)
(dynamic-require 'syntax-color/default-lexer 'default-lexer)]
[else
(default-filter key default)]))
(list (parse src (tokenize ip))))
}|
}
@ -572,43 +562,43 @@ compilation:
(require (for-syntax racket/base syntax/parse))
(provide #%module-begin
;; We reuse Racket's treatment of raw datums, specifically
;; for strings and numbers:
#%datum
;; And otherwise, we provide definitions of these three forms.
;; During compiliation, Racket uses these definitions to
;; rewrite into for loops, displays, and newlines.
drawing rows chunk)
;; We reuse Racket's treatment of raw datums, specifically
;; for strings and numbers:
#%datum
;; And otherwise, we provide definitions of these three forms.
;; During compiliation, Racket uses these definitions to
;; rewrite into for loops, displays, and newlines.
drawing rows chunk)
;; Define a few compile-time functions to do the syntax rewriting:
(begin-for-syntax
(define (compile-drawing drawing-stx)
(syntax-parse drawing-stx
[({~literal drawing} row-stxs ...)
(define (compile-drawing drawing-stx)
(syntax-parse drawing-stx
[({~literal drawing} rows-stxs ...)
(syntax/loc drawing-stx
(begin row-stxs ...))]))
(syntax/loc drawing-stx
(begin rows-stxs ...))]))
(define (compile-rows row-stx)
(syntax-parse row-stx
[({~literal rows}
({~literal repeat} repeat-number)
chunks ...
";")
(define (compile-rows rows-stx)
(syntax-parse rows-stx
[({~literal rows}
({~literal repeat} repeat-number)
chunks ...
";")
(syntax/loc row-stx
(for ([i repeat-number])
chunks ...
(newline)))]))
(syntax/loc rows-stx
(for ([i repeat-number])
chunks ...
(newline)))]))
(define (compile-chunk chunk-stx)
(syntax-parse chunk-stx
[({~literal chunk} chunk-size chunk-string)
(define (compile-chunk chunk-stx)
(syntax-parse chunk-stx
[({~literal chunk} chunk-size chunk-string)
(syntax/loc chunk-stx
(for ([k chunk-size])
(display chunk-string)))])))
(syntax/loc chunk-stx
(for ([k chunk-size])
(display chunk-string)))])))
;; Wire up the use of "drawing", "rows", and "chunk" to these
@ -675,7 +665,7 @@ generates.
@subsection[#:tag "brag-syntax"]{Syntax and terminology}
A program in the @tt{brag} language consists of the language line
@litchar{#lang brag}, followed by a collection of @tech{rule}s and
possibly @tech{line comment}s or @tech{multiline comment}s.
@tech{line comment}s.
A @deftech{rule} is a sequence consisting of: a @tech{rule identifier}, a separator (either @litchar{":"} or @litchar{"::="}), and a @tech{pattern}.
@ -686,15 +676,14 @@ A @deftech{symbolic token identifier} is an @tech{identifier} that is in upper c
A @deftech{line comment} begins with either @litchar{#} or @litchar{;} and
continues till the end of the line.
A @deftech{multiline comment} begins with @litchar{(*} and ends with @litchar{*)}.
An @deftech{identifier} is a sequence of letters, numbers, or
characters in the set @racket["-.!$%&/<=>^_~@"]. It must not contain
@litchar{*}, @litchar{+}, @litchar{?}, or @litchar|{{}| and @litchar|{}}|, as those characters are used to denote quantification.
characters in the set @racket["-.!$%&/<=>?^_~@"]. It must not contain
@litchar{*}, @litchar{+}, or @litchar|{{}| and @litchar|{}}|, as those characters are used to denote quantification.
A @deftech{pattern} is one of the following:
@itemize[
@item{an implicit sequence of @tech{pattern}s separated by whitespace or commas.}
@item{an implicit sequence of @tech{pattern}s separated by whitespace.}
@item{a @deftech{terminal}: either a literal string or a @tech{symbolic token identifier}.
@ -702,7 +691,7 @@ A @deftech{pattern} is one of the following:
A literal string can match the string itself, or a @racket[token] structure whose type field contains that string (or its symbol form). So @racket["FOO"] in a rule pattern would match the tokens @racket["FOO"], @racket[(token "FOO" "bar")], or @racket[(token 'FOO "bar")].
A symbolic token identifier can also match the string version of the identifier, or a @racket[token] whose type field is the symbol or string form of the identifier. So @racket[FOO] in a rule pattern would @emph{also} match the tokens @racket["FOO"], @racket[(token 'FOO "bar")], or @racket[(token "FOO" "bar")]. (In every case, the value of a token, like @racket["bar"], can be anything, and may or may not be the same as the symbolic token identifier.)
A symbolic token identifier can also match the string version of the identifier, or a @racket[token] whose type field is the symbol or string form of the identifier. So @racket[FOO] in a rule pattern would @emph{also} match the tokens @racket["FOO"], @racket[(token 'FOO "bar")], or @racket[(token "FOO" "bar")]. (In every case, the value of a token, like @racket["bar"], can be anything, and may or may not be the same as its type.)
Because their underlying meanings are the same, the symbolic token identifier ends up being a notational convenience for readability inside a rule pattern. Typically, the literal string @racket["FOO"] is used to connote ``match the string @racket["FOO"] exactly'' and the symbolic token identifier @racket[FOO] specially connotes ``match a token of type @racket['FOO]''.
@ -771,7 +760,7 @@ More examples:
}
]
@subsection[#:tag "cuts-and-splices"]{Cuts & splices}
@subsection{Cuts & splices}
By default, every matched token shows up in the parse tree. But sometimes that means that the parse tree ends up holding a bunch of tokens that were only needed to complete the parsing. Once they've served their purpose, it's sometimes useful to filter them out (for instance, to simplify the implementation of a language expander). To help with this kind of housekeeping, @racket[brag] supports @emph{cuts} and @emph{splices}.
@ -826,7 +815,7 @@ term : factor (/'*' factor)*
| "8" | "9")+
}|
This time, the rule name disappears from the parse tree, but its nodes and elements remain:
This time, the rule name disppears from the parse tree, but its nodes and elements remain:
@racketblock['(expr (term ("1")) (term ("2") ("3")))]
@ -869,8 +858,6 @@ This time, all the appearances of @racket[term] nodes in the parse tree will hav
As a convenience, when a grammar element is spliced, or a rule name is cut, @racket[brag] preserves the rule name by adding it as a syntax property to the residual elements, using the rule name as a key, and the original syntax object representing the rule name as the value.
Caveat for the top-level rule: though the rule name can have a cut, it cannot have a splice — once you're at the top level, there's nothing above to splice into.
@subsection{Syntax errors}
@ -921,15 +908,14 @@ grammars.
A program written in @litchar{#lang brag} produces a module that provides a few
bindings. The most important of these is @racket[parse]:
@defproc[(parse [source-path any/c #f]
@defproc[(parse [source any/c #f]
[token-source (or/c (sequenceof token)
(-> token))])
syntax?]{
Parses a series of @tech{tokens} according to the rules in the grammar, using the
first rule of the grammar for the initial production. The parse must completely consume
@racket[token-source]. The optional @racket[source-path] argument is used to enrich the
syntax-location fields.
Parses the sequence of @tech{tokens} according to the rules in the grammar, using the
first rule as the start production. The parse must completely consume
@racket[token-source].
The @deftech{token source} can either be a sequence, or a 0-arity function that
produces @tech{tokens}.
@ -1050,10 +1036,10 @@ In addition to the exports shown below, the @racketmodname[brag/support] module
@defproc[(token [type (or/c string? symbol?)]
[val any/c #f]
[#:line line (or/c exact-positive-integer? #f) #f]
[#:column column (or/c exact-nonnegative-integer? #f) #f]
[#:position position (or/c exact-positive-integer? #f) #f]
[#:span span (or/c exact-nonnegative-integer? #f) #f]
[#:line line (or/c positive-integer? #f) #f]
[#:column column (or/c natural-number? #f) #f]
[#:position position (or/c positive-integer? #f) #f]
[#:span span (or/c natural-number? #f) #f]
[#:skip? skip? boolean? #f]
)
token-struct?]{
@ -1068,10 +1054,10 @@ In addition to the exports shown below, the @racketmodname[brag/support] module
@defstruct[token-struct ([type symbol?]
[val any/c]
[position (or/c exact-positive-integer? #f)]
[line (or/c exact-nonnegative-integer? #f)]
[column (or/c exact-positive-integer? #f)]
[span (or/c exact-nonnegative-integer? #f)]
[position (or/c positive-integer? #f)]
[line (or/c natural-number? #f)]
[column (or/c positive-integer? #f)]
[span (or/c natural-number? #f)]
[skip? boolean?])
#:transparent]{
The token structure type.
@ -1094,23 +1080,19 @@ In addition to the exports shown below, the @racketmodname[brag/support] module
DrRacket should highlight the offending locations in the source.}
@defproc[(apply-port-proc [proc procedure?]
[port (or/c string? input-port?) (current-input-port)])
list?]{
Repeatedly apply @racket[proc] to @racket[port], gathering the results into a list. @racket[port] can be an input port or a string (which is converted to a string port). Useful for testing or debugging a lexer or tokenizer.
}
@defproc[(apply-lexer [lexer procedure?]
[port (or/c string? input-port?) (current-input-port)])
@defproc[(apply-tokenizer-maker [tokenizer-maker procedure?]
[source (or/c string?
input-port?)])
list?]{
Alias for @racket[apply-port-proc].
Repeatedly apply @racket[tokenizer-maker] to @racket[source], gathering the resulting tokens into a list. @racket[source] can be a string or an input port. Useful for testing or debugging a tokenizer.
}
@defproc[(apply-tokenizer-maker [tokenizer-maker procedure?]
[port (or/c string? input-port?) (current-input-port)])
@defproc[(apply-lexer [lexer procedure?]
[source (or/c string?
input-port?)])
list?]{
Repeatedly apply @racket[tokenizer-maker] to @racket[port], gathering the resulting tokens into a list. @racket[port] can be an input port or a string (which is converted to a string port).
Repeatedly apply @racket[lexer] to @racket[source], gathering the resulting tokens into a list. @racket[source] can be a string or an input port. Useful for testing or debugging a lexer.
}
@ -1184,28 +1166,11 @@ In other words, this matches @emph{all} the @racket[re]s in order, whereas @rack
@defform[(from/to open close)]{
A string that is bounded by literal tokens @racket[open] and @racket[close]. Matching is non-greedy (meaning, it stops at the first occurence of @racket[close]). The resulting lexeme includes @racket[open] and @racket[close]. To remove them, see @racket[trim-ends].}
A string that is bounded by @racket[open] and @racket[close]. Matching is non-greedy (meaning, it stops at the first occurence of @racket[close]). The resulting lexeme includes @racket[open] and @racket[close]. To remove them, see @racket[trim-ends].}
@defform[(from/stop-before open close)]{
Like @racket[from/to], a string that is bounded by literal tokens @racket[open] and @racket[close], except that @racket[close] is not included in the resulting lexeme. Matching is non-greedy (meaning, it stops at the first occurence of @racket[close]).}
@subsection{Differences with @tt{ragg}}
This package is a fork of @link["https://docs.racket-lang.org/ragg"]{@racket[ragg]}. The most salient additions:
@itemize[
@item{@seclink["cuts-and-splices"]{Cuts & splices}.}
@item{Improved @seclink["brag-syntax"]{syntax} for grammars, including comments and and quantified patterns.}
@item{Support for REPL interactions.}
@item{Some new conveniences, like @racket[parse-to-datum], @racket[apply-lexer], @racket[trim-ends], @racket[from/to], and @racket[from/stop-before].
}
Like @racket[from/to], a string that is bounded by @racket[open] and @racket[close], except that @racket[close] is not included in the resulting lexeme. Matching is non-greedy (meaning, it stops at the first occurence of @racket[close]).}
]
@close-eval[my-eval]

@ -1,4 +0,0 @@
#lang info
(define scribblings '(("brag.scrbl")))

@ -2,7 +2,6 @@
(require (for-syntax racket/base
racket/list
"codegen.rkt"
"runtime.rkt"
"flatten.rkt")
br-parser-tools/lex
br-parser-tools/cfg-parser
@ -25,7 +24,7 @@
(syntax-case rules-stx ()
[(_) (raise-syntax-error 'brag
(format "The grammar does not appear to have any rules")
(syntax-source rules-stx))]
'brag-module)]
[(_ . RULES)
(let ([rules (syntax->list #'RULES)]) ;; (listof stx)
@ -71,38 +70,33 @@
(cons eof token-EOF)
(cons 'TOKEN-TYPE TOKEN-TYPE-CONSTRUCTOR) ...)))
(define-syntax (MAKE-RULE-PARSER stx)
(syntax-case stx ()
[(_ START-RULE-ID)
(and (identifier? #'START-RULE-ID) (member (syntax-e #'START-RULE-ID) 'RULE-IDS))
(define-syntax (MAKE-RULE-PARSER rule-id-stx)
(syntax-case rule-id-stx ()
[(_ start-rule)
(and (identifier? #'start-rule)
(member (syntax-e #'start-rule) 'RULE-IDS))
;; The cfg-parser depends on the start-rule provided in (start ...) to have the same
;; context as the rest of this body. Hence RECOLORED-START-RULE
(with-syntax ([RECOLORED-START-RULE (datum->syntax #'RULES-STX (syntax-e #'START-RULE-ID))])
#'(let ()
(define (rule-parser tokenizer)
(define rule-grammar (cfg-parser (tokens enumerated-tokens)
(src-pos)
(start RECOLORED-START-RULE)
(end EOF)
(error the-error-handler)
(grammar . GENERATED-RULE-CODES)))
(define next-token (make-permissive-tokenizer tokenizer all-tokens-hash/mutable))
;; here's how we support grammar "cuts" on top rule name
(define parse-tree-stx (rule-grammar next-token))
(syntax-case parse-tree-stx ()
[(TOP-RULE-NAME . _)
(if (eq? (syntax-property #'TOP-RULE-NAME 'hide-or-splice?) 'hide)
(remove-rule-name parse-tree-stx) ; use `remove-rule-name` so we get the same housekeeping
parse-tree-stx)]
[_ (error 'malformed-parse-tree)]))
(case-lambda [(tokenizer) (rule-parser tokenizer)]
[(source tokenizer)
(parameterize ([current-source source])
(rule-parser tokenizer))])))]
(with-syntax ([RECOLORED-START-RULE (datum->syntax #'RULES-STX (syntax-e #'start-rule))])
#'(let ([THE-GRAMMAR (cfg-parser (tokens enumerated-tokens)
(src-pos)
(start RECOLORED-START-RULE)
(end EOF)
(error THE-ERROR-HANDLER)
(grammar . GENERATED-RULE-CODES))])
(procedure-rename
(case-lambda [(tokenizer)
(define next-token
(make-permissive-tokenizer tokenizer all-tokens-hash/mutable))
(THE-GRAMMAR next-token)]
[(source tokenizer)
(parameterize ([current-source source])
(PARSE tokenizer))])
(string->symbol (format "~a-rule-parser" 'start-rule)))))]
[(_ not-a-rule-id)
(raise-syntax-error #f
(format "Rule ~a is not defined in the grammar" (syntax-e #'not-a-rule-id))
stx)]))
rule-id-stx)]))
;; start-id has to be a value, not an expr, because make-rule-parser is a macro
(define PARSE (procedure-rename (MAKE-RULE-PARSER START-ID) 'PARSE))

@ -6,33 +6,38 @@
flatten-rules
prim-rule)
(define (make-fresh-name)
(let ([n 0])
(λ ()
(lambda ()
(set! n (add1 n))
(string->symbol (format "%rule~a" n)))))
(define default-fresh-name (make-fresh-name))
(define default-fresh-name
(make-fresh-name))
;; Translates rules to lists of primitive rules.
(define (flatten-rules rules #:fresh-name [fresh-name default-fresh-name])
(define ht (make-hasheq))
(apply append (for/list ([a-rule (in-list rules)])
(flatten-rule a-rule
#:ht ht
#:fresh-name fresh-name))))
(define ht (make-hash))
(apply append (map (lambda (a-rule) (flatten-rule a-rule
#:ht ht
#:fresh-name fresh-name))
rules)))
;; flatten-rule: rule -> (listof primitive-rule)
(define (flatten-rule a-rule
#:fresh-name [fresh-name default-fresh-name]
;; ht: (hashtableof pattern-hash-key pat)
#:ht [ht (make-hasheq)])
#:ht [ht (make-hash)])
(let recur ([a-rule a-rule] [inferred? #f])
(let recur ([a-rule a-rule]
[inferred? #f])
;; lift-nonprimitive-pattern: pattern -> (values (listof primitive-rule) pattern)
;; Turns non-primitive patterns into primitive patterns, and produces a set of
@ -93,14 +98,6 @@
(append (list #'(HEAD ORIGIN NAME [SUB-PAT ...] ...))
(apply append (reverse inferred-ruless/rev)))))]
[(repeat 0 0 SUB-PAT)
;; repeat from 0 to 0 (is a no-op)
(recur #'(rule NAME (seq)) #f)]
[(repeat 0 MAYBE-MAX SUB-PAT)
;; repeat from 0 (as a maybe rule)
(recur #'(rule NAME (maybe (repeat 1 MAYBE-MAX SUB-PAT))) #f)]
[(repeat MIN #f SUB-PAT)
;; indefinite repeat
(begin
@ -118,15 +115,10 @@
(begin
(define min (syntax-e #'MIN))
(define max (syntax-e #'MAX))
(unless (<= min max)
(raise-syntax-error #f (format "minimum repeat count cannot be larger than maximum, got {~a,~a}" min max) a-rule))
;; has to keep the same rule NAME to work correctly
(define new-rule-stx
(if (= min max)
(with-syntax ([MIN-SUBPATS (make-list min #'SUB-PAT)])
#'(rule NAME (seq . MIN-SUBPATS)))
(with-syntax ([REPEATS-REMAINING (- max min)]) ; REPEATS-REMAINING is a positive integer
#'(rule NAME (seq (repeat MIN MIN SUB-PAT) (repeat 0 REPEATS-REMAINING SUB-PAT))))))
(define new-rule-stx (with-syntax ([(MIN-SUBPAT ...) (make-list min #'SUB-PAT)]
[(EXTRA-SUBPAT ...) (make-list (- max min) #'SUB-PAT)])
;; has to keep the same name to work correctly
#'(rule NAME (seq MIN-SUBPAT ... (maybe EXTRA-SUBPAT) ...))))
(recur new-rule-stx #f))]
[(maybe SUB-PAT)

@ -1,4 +1,5 @@
#lang racket/base
(require racket/match
racket/list
racket/generator
@ -7,19 +8,18 @@
brag/private/internal-support)
(provide the-error-handler
(provide THE-ERROR-HANDLER
make-permissive-tokenizer
atomic-datum->syntax
positions->srcloc
rule-components->syntax
remove-rule-name)
rule-components->syntax)
;; The level of indirection here is necessary since the yacc grammar wants a
;; function value for the error handler up front. We want to delay that decision
;; till parse time.
(define (the-error-handler tok-ok? tok-name tok-value start-pos end-pos)
(define (THE-ERROR-HANDLER tok-ok? tok-name tok-value start-pos end-pos)
(match (positions->srcloc start-pos end-pos)
[(list src line col offset span)
((current-parser-error-handler) tok-name
@ -171,56 +171,61 @@ This would be the place to check a syntax property for hiding.
(datum->syntax #f d (positions->srcloc start-pos end-pos) stx-with-original?-property))
(define (remove-rule-name component-stx #:splice? [splice #f])
;; when removing a rule name, we apply it as a syntax property to the remaining elements
;; for possible later usage (aka, why throw away information)
(define (apply-name-property name-stx stxs)
(for/list ([stx (in-list (syntax->list stxs))])
(syntax-property stx (syntax->datum name-stx) name-stx)))
(define (splice-stx component-stx)
;; when splicing, we apply rule name as a syntax property to the remaining elements
(syntax-case component-stx ()
[(name . subcomponents)
(syntax-property #'name 'rule-id) ; name has not been removed (recognized by presence of 'rule-id)
(apply-name-property #'name #'subcomponents)]
[subcomponents ; name has been removed, but it is stored in 'generating-rule property
(let* ([name-datum (syntax-property #'subcomponents 'generating-rule)]
[name-stx (syntax-property #'subcomponents name-datum)])
(apply-name-property name-stx #'subcomponents))]))
(define (remove-rule-name component-stx)
(syntax-case component-stx ()
[(name . subcomponents)
(let ([name-datum (syntax->datum #'name)])
;; two properties: 'rule returns name-datum, and name-datum returns original #'name stx
(define (annotate-name stx) (syntax-property (syntax-property stx name-datum #'name) 'rule name-datum))
(if splice
;; when splicing, returned list is a regular list, with each element having the property.
(map annotate-name (syntax->list #'subcomponents))
;; when hiding, returned list should be a syntaxed list with the property
;; iow, basically the same as `component-stx`, minus the name
(annotate-name (datum->syntax component-stx #'subcomponents component-stx component-stx))))]
[_ (raise-syntax-error 'remove-rule-name "component has no name" component-stx)]))
(define (preprocess-component component-stx)
(cond
;; test splice first in case both hiding and splicing are set, for instance:
;; /rule : thing @rule
;; otherwise the hide prevents the splice from being expressed
[(or (eq? (syntax-property component-stx 'hide-or-splice) 'splice)
(syntax-property component-stx 'splice-rh-id))
(remove-rule-name component-stx #:splice? #t)] ; spliced version is lifted out of the sublist
[(eq? (syntax-property component-stx 'hide-or-splice) 'hide)
(list (remove-rule-name component-stx))] ; hidden version still wrapped in a sublist
[else (list component-stx)]))
(define (preprocess-component-lists component-stxss)
; "preprocess" means splicing and rule-name-hiding where indicated
;; inside `component-stx` is a rule name followed by subcomponents
(syntax-property
(syntax-property
(datum->syntax component-stx #'subcomponents component-stx component-stx)
name-datum #'name)
'generating-rule name-datum))]))
(define (splice-component-lists component-lists)
;; each `component-list` is a list that's either empty, or contains component-stx objects
;; inside `component-stx` is a name followed by subcomponents
(append*
(for*/list ([component-stxs (in-list component-stxss)]
[component-stx (in-list component-stxs)])
(preprocess-component component-stx))))
(for*/list ([component-list (in-list component-lists)]
#:unless (empty? component-list)
[component-stx (in-list component-list)])
(if (or (eq? (syntax-property component-stx 'hide-or-splice) 'splice)
(syntax-property component-stx 'splice-rh-id))
(splice-stx component-stx) ; spliced version is lifted out of the sublist
(list component-stx))))) ; otherwise left inside sublist
;; rule-components->syntax: (U symbol false) (listof stx) ... #:srcloc (U #f (list src line column offset span)) -> stx
;; Creates an stx out of the rule name and its components.
;; The location information of the rule spans that of its components.
(define (rule-components->syntax rule-name/false #:srcloc [srcloc #f] #:hide-or-splice? [hide-or-splice #f] . component-lists)
(define new-rule-name
;; stash the hide property on rule names so we can use it later if we want
(syntax-property (datum->syntax #f rule-name/false srcloc stx-with-original?-property) 'hide-or-splice? hide-or-splice))
(define new-rule-components (preprocess-component-lists component-lists))
(define rule-result (cons new-rule-name new-rule-components))
(define new-rule-name (syntax-property
(datum->syntax #f rule-name/false srcloc stx-with-original?-property)
'rule-id #t))
(define rule-result (cons new-rule-name (splice-component-lists component-lists)))
(define syntaxed-rule-result (datum->syntax #f rule-result srcloc stx-with-original?-property))
;; not 'hide-or-splice-lhs-id, because this will now become a (right-hand) component in a different (left-hand) rule
;; actual splicing happens when the parent rule is processed (with procedure above)
(syntax-property syntaxed-rule-result 'hide-or-splice hide-or-splice))
;; not 'hide-or-splice-lhs-id, because this will now become
;; a (right-hand) component in a different (left-hand) rule
;; actual splicing happens when the parent rule is processed (with `splice-component-lists`)
(syntax-property ((if (eq? hide-or-splice 'hide)
remove-rule-name
values) syntaxed-rule-result)
'hide-or-splice hide-or-splice))

@ -0,0 +1,2 @@
#lang brag
/top : "x"

@ -1,13 +1,7 @@
#lang info
(define name "brag")
(define collection 'multi)
(define deps '(["base" #:version "6.3"]
"brag-lib"))
(define build-deps '("at-exp-lib"
"br-parser-tools-doc"
"racket-doc"
"scribble-lib"))
(define implies '("brag-lib"))
(define scribblings '(("brag.scrbl")))
(define blurb '("brag: the Beautiful Racket AST Generator. A fork of Danny Yoo's ragg. A design goal is to be easy for beginners to use. Given a grammar in EBNF, brag produces a parser that generates Racket's native syntax objects with full source location."))
(define deps (list))
(define test-omit-paths '("examples/simple-line-drawing/examples/letter-i.rkt"))

@ -12,8 +12,7 @@
(from/to "'" "'")
(from/to "\"" "\"")) (token 'LIT lexeme)]
[(:or "()" "Ø" "") (token 'NO-COLOR lexeme)] ; empty set symbols
[(:or (char-set "()[]{}|+*:?") hide-char splice-char "::=") (token 'MISC lexeme)]
[(from/to "(*" "*)") (token 'COMMENT lexeme)]
[(:or (char-set "()[]{}|+*:?") hide-char splice-char) (token 'MISC lexeme)]
[(:seq (:or "#" ";") (complement (:seq (:* any-char) NL (:* any-char))) (:or NL "")) (token 'COMMENT lexeme)]
[id (token 'ID lexeme)]
[any-char (token 'OTHER lexeme)]))

@ -22,11 +22,11 @@
(define this-line (line tbox posn))
(cond
[(not prev-line) #f]
[(eqv? (line-first-visible-char tbox this-line) #\|)
[(char=? (line-first-visible-char tbox this-line) #\|)
(define start (send tbox line-start-position prev-line))
(define end (send tbox line-end-position prev-line))
(for*/first ([pos (in-range start end)]
[c (in-value (send tbox get-character pos))]
#:when (memv c '(#\: #\|)))
#:when (or (char=? c #\:) (char=? c #\|)))
(- pos start))]
[else #f]))

@ -0,0 +1,36 @@
#lang racket/base
(require brag/support)
(provide current-source
current-parser-error-handler
current-tokenizer-error-handler)
;; During parsing, we should define the source of the input.
(define current-source (make-parameter #f))
;; When an parse error happens, we call the current-parser-error-handler:
(define current-parser-error-handler
(make-parameter
(lambda (tok-name tok-value offset line col span)
(raise (exn:fail:parsing
(format "Encountered parsing error near ~e (token ~e) while parsing ~e [line=~a, column=~a, offset=~a]"
tok-value tok-name
(current-source)
line col offset)
(current-continuation-marks)
(list (srcloc (current-source) line col offset span)))))))
;; When a tokenization error happens, we call the current-tokenizer-error-handler.
(define current-tokenizer-error-handler
(make-parameter
(lambda (tok-type tok-value offset line column span)
(raise (exn:fail:parsing
(format "Encountered unexpected token ~e (~e) while parsing ~e [line=~a, column=~a, offset=~a]"
tok-type
tok-value
(current-source)
line column offset)
(current-continuation-marks)
(list (srcloc (current-source) line column offset span)))))))

@ -0,0 +1,132 @@
#lang racket/base
(require (for-syntax racket/base "parser.rkt"))
(require br-parser-tools/lex
(prefix-in : br-parser-tools/lex-sre)
"parser.rkt"
"rule-structs.rkt"
racket/string)
(provide lex/1 tokenize)
(module+ lex-abbrevs
(provide hide-char splice-char id-char letter digit NL id))
;; A newline can be any one of the following.
(define-lex-abbrev NL (:or "\r\n" "\r" "\n"))
;; reserved-chars = chars used for quantifiers & parse-tree filtering
(define-for-syntax quantifiers "+:*?{}") ; colon is reserved to separate rules and productions
(define-lex-trans reserved-chars
(λ(stx) #`(char-set #,(format "~a~a~a" quantifiers hide-char splice-char))))
(define-lex-trans hide-char-trans (λ(stx) #`(char-set #,(format "~a" hide-char))))
(define-lex-trans splice-char-trans (λ(stx) #`(char-set #,(format "~a" splice-char))))
(define-lex-abbrevs
[letter (:or (:/ "a" "z") (:/ #\A #\Z))]
[digit (:/ #\0 #\9)]
[id-char (:or letter digit (:& (char-set "+:*@!-.$%&/=?^_~<>") (char-complement (reserved-chars))))]
[hide-char (hide-char-trans)]
[splice-char (splice-char-trans)]
)
(define-lex-abbrev id (:& (complement (:+ digit)) (:+ id-char)))
(define-lex-abbrev id-separator (:or ":" "::="))
(define-lex-abbrev esc-chars (union "\\a" "\\b" "\\t" "\\n" "\\v" "\\f" "\\r" "\\e"))
(define (unescape-lexeme lexeme quote-char)
;; convert the literal string representation back into an escape char with lookup table
(define unescapes (hash "a" 7 "b" 8 "t" 9 "n" 10 "v" 11 "f" 12 "r" 13 "e" 27 "\"" 34 "'" 39 "\\" 92))
(define pat (regexp (format "(?<=^~a\\\\).(?=~a$)" quote-char quote-char)))
(cond
[(regexp-match pat lexeme)
=> (λ (m) (string quote-char (integer->char (hash-ref unescapes (car m))) quote-char))]
[else lexeme]))
(define lex/1
(lexer-src-pos
;; handle whitespace & escape chars within quotes as literal tokens: "\n" "\t" '\n' '\t'
;; match the escaped version, and then unescape them before they become token-LITs
[(:: "'"
(:or (:* (:or "\\'" esc-chars (:~ "'" "\\"))) "\\\\")
"'")
(token-LIT (unescape-lexeme lexeme #\'))]
[(:: "\""
(:or (:* (:or "\\\"" esc-chars (:~ "\"" "\\"))) "\\\\")
"\"")
(token-LIT (unescape-lexeme lexeme #\"))]
[(:or "()" "Ø" "") (token-EMPTY lexeme)]
["("
(token-LPAREN lexeme)]
["["
(token-LBRACKET lexeme)]
[")"
(token-RPAREN lexeme)]
["]"
(token-RBRACKET lexeme)]
[hide-char
(token-HIDE lexeme)]
[splice-char
(token-SPLICE lexeme)]
["|"
(token-PIPE lexeme)]
[(:or "+" "*" "?"
(:: "{" (:* digit) (:? (:: "," (:* digit))) "}"))
(token-REPEAT lexeme)]
[whitespace
;; Skip whitespace
(return-without-pos (lex/1 input-port))]
;; Skip comments up to end of line
[(:: (:or "#" ";")
(complement (:: (:* any-char) NL (:* any-char)))
(:or NL ""))
(return-without-pos (lex/1 input-port))]
[(eof)
(token-EOF lexeme)]
[(:: id (:* whitespace) id-separator)
(token-RULE_HEAD lexeme)]
[(:: hide-char id (:* whitespace) id-separator)
(token-RULE_HEAD_HIDDEN lexeme)]
[(:: splice-char id (:* whitespace) id-separator)
(token-RULE_HEAD_SPLICED lexeme)]
[id
(token-ID lexeme)]
;; We call the error handler for everything else:
[(:: any-char)
(let-values ([(rest-of-text end-pos-2)
(lex-nonwhitespace input-port)])
((current-parser-error-handler)
#f
'error
(string-append lexeme rest-of-text)
(position->pos start-pos)
(position->pos end-pos-2)))]))
;; This is the helper for the error production.
(define lex-nonwhitespace
(lexer
[(:+ (char-complement whitespace))
(values lexeme end-pos)]
[any-char
(values lexeme end-pos)]
[(eof)
(values "" end-pos)]))
;; position->pos: position -> pos
;; Converts position structures from br-parser-tools/lex to our own pos structures.
(define (position->pos a-pos)
(pos (position-offset a-pos)
(position-line a-pos)
(position-col a-pos)))
;; tokenize: input-port -> (-> token)
(define (tokenize ip #:source [source (object-name ip)])
(λ () (parameterize ([file-path source])
(lex/1 ip))))

@ -0,0 +1,34 @@
#lang racket/base
(require br-parser-tools/lex)
(provide (all-defined-out))
;; During parsing, we should define the source of the input.
(define current-source (make-parameter #f))
;; When bad things happen, we need to emit errors with source location.
(struct exn:fail:parse-grammar exn:fail (srclocs)
#:transparent
#:property prop:exn:srclocs (lambda (instance)
(exn:fail:parse-grammar-srclocs instance)))
(define current-parser-error-handler
(make-parameter
(lambda (tok-ok? tok-name tok-value start-pos end-pos)
(raise (exn:fail:parse-grammar
(format "Error while parsing grammar near: ~e [line=~a, column~a, position=~a]"
tok-value
(position-line start-pos)
(position-col start-pos)
(position-offset start-pos))
(current-continuation-marks)
(list (srcloc (current-source)
(position-line start-pos)
(position-col start-pos)
(position-offset start-pos)
(if (and (number? (position-offset end-pos))
(number? (position-offset start-pos)))
(- (position-offset end-pos)
(position-offset start-pos))
#f))))))))

@ -60,22 +60,16 @@
(exn:fail:parsing-srclocs instance)))
(define (open-input-string-with-locs str)
(parameterize ([port-count-lines-enabled #t])
(open-input-string str)))
(provide (rename-out [apply-port-proc apply-lexer])
apply-port-proc)
(define (apply-port-proc proc [val (current-input-port)])
(for/list ([t (in-port proc (if (string? val) (open-input-string-with-locs val) val))])
(provide apply-lexer)
(define (apply-lexer lexer val)
(for/list ([t (in-port lexer (if (string? val) (open-input-string val) val))])
t))
(provide apply-tokenizer-maker
(rename-out [apply-tokenizer-maker apply-tokenizer]))
(define (apply-tokenizer-maker tokenize [in (current-input-port)])
(define (apply-tokenizer-maker tokenize in)
(define input-port (if (string? in)
(open-input-string-with-locs in)
(open-input-string in)
in))
(define token-producer (tokenize input-port))
(for/list ([token (in-producer token-producer (λ(tok)

@ -46,5 +46,5 @@
(check-exn exn:fail:parsing?
(lambda () (parse '("zero" "one" "zero"))))
(check-exn (regexp (regexp-quote
"Encountered unexpected token of type \"zero\" (value \"zero\") while parsing"))
"Encountered unexpected token \"zero\" (\"zero\") while parsing"))
(lambda () (parse '("zero" "one" "zero"))))

@ -11,15 +11,14 @@
"test-errors.rkt"
"test-flatten.rkt"
"test-hide-and-splice.rkt"
"test-hide-top.rkt"
"test-lexer.rkt"
"test-nested-repeats.rkt"
"test-old-token.rkt"
"test-parser.rkt"
"test-quotation-marks-and-backslashes.rkt"
"test-simple-arithmetic-grammar.rkt"
"test-simple-line-drawing.rkt"
"test-start-and-atok.rkt"
"test-top-level-cut.rkt"
"test-weird-grammar.rkt"
"test-whitespace.rkt"
"test-wordy.rkt"

@ -33,18 +33,18 @@
(check-equal? (map syntax->datum
(flatten-rule #'(rule expr (seq (lit "1") (seq (lit "2") (lit "3"))))))
'((prim-rule seq expr
[(lit "1") (lit "2") (lit "3")])))
[(lit "1") (lit "2") (lit "3")])))
(check-equal? (map syntax->datum
(flatten-rule #'(rule expr (seq (seq (lit "1") (lit "2")) (lit "3")))))
'((prim-rule seq expr
[(lit "1") (lit "2") (lit "3")])))
[(lit "1") (lit "2") (lit "3")])))
(check-equal? (map syntax->datum
(flatten-rule #'(rule expr (seq (seq (lit "1")) (seq (lit "2") (lit "3"))))))
'((prim-rule seq expr
[(lit "1") (lit "2") (lit "3")])))
[(lit "1") (lit "2") (lit "3")])))
@ -96,16 +96,14 @@
;; repeat
(check-equal? (map syntax->datum
(flatten-rule #'(rule rule-2+ (repeat 0 #f (id rule-2)))))
'((prim-rule maybe rule-2+ ((inferred-id %rule1 repeat)) ())
(inferred-prim-rule repeat %rule1
((inferred-id %rule1 repeat) (id rule-2))
((id rule-2)))))
'((prim-rule repeat rule-2+
[(inferred-id rule-2+ repeat) (id rule-2)]
[])))
(check-equal? (map syntax->datum
(flatten-rule #'(rule rule-2+ (repeat 0 #f (seq (lit "+") (id rule-2))))))
'((prim-rule maybe rule-2+ ((inferred-id %rule2 repeat)) ())
(inferred-prim-rule repeat %rule2
((inferred-id %rule2 repeat) (lit "+") (id rule-2))
((lit "+") (id rule-2)))))
'((prim-rule repeat rule-2+
[(inferred-id rule-2+ repeat) (lit "+") (id rule-2)]
[])))
(check-equal? (map syntax->datum
(flatten-rule #'(rule rule-2+ (repeat 1 #f (id rule-2)))))
@ -134,8 +132,8 @@
[(lit "x")]
[(inferred-id r1 maybe)])
(inferred-prim-rule maybe r1
[(lit "y")]
[])))
[(lit "y")]
[])))
;; choice, maybe, repeat
(check-equal? (map syntax->datum
(flatten-rule #'(rule sexp (choice (lit "x")
@ -145,11 +143,11 @@
[(lit "x")]
[(inferred-id r1 maybe)])
(inferred-prim-rule maybe r1
[(inferred-id r2 repeat)]
[])
[(inferred-id r2 repeat)]
[])
(inferred-prim-rule repeat r2
[(inferred-id r2 repeat) (lit "y")]
[(lit "y")])))
[(inferred-id r2 repeat) (lit "y")]
[(lit "y")])))
;; choice, seq
(check-equal? (map syntax->datum
(flatten-rule #'(rule sexp (choice (seq (lit "x") (lit "y"))
@ -176,11 +174,8 @@
(check-equal? (map syntax->datum
(flatten-rule #'(rule expr (seq (id term) (repeat 0 #f (seq (lit "+") (id term)))))
#:fresh-name (make-fresh-name)))
'((prim-rule seq expr ((id term) (inferred-id r1 repeat)))
(prim-rule maybe r1 ((inferred-id r2 repeat)) ())
(inferred-prim-rule repeat r2
((inferred-id r2 repeat) (lit "+") (id term))
((lit "+") (id term)))))
'((prim-rule seq expr [(id term) (inferred-id r1 repeat)])
(inferred-prim-rule repeat r1 [(inferred-id r1 repeat) (lit "+") (id term)] [])))
;; larger example: simple arithmetic
@ -191,14 +186,8 @@
(rule factor (token INT))))
#:fresh-name (make-fresh-name)))
'((prim-rule seq expr ((id term) (inferred-id r1 repeat)))
(prim-rule maybe r1 ((inferred-id r2 repeat)) ())
(inferred-prim-rule repeat r2
((inferred-id r2 repeat) (lit "+") (id term))
((lit "+") (id term)))
(prim-rule seq term ((id factor) (inferred-id r3 repeat)))
(prim-rule maybe r3 ((inferred-id r4 repeat)) ())
(inferred-prim-rule repeat r4
((inferred-id r4 repeat) (lit "*") (id factor))
((lit "*") (id factor)))
(prim-rule token factor ((token INT)))))
'((prim-rule seq expr [(id term) (inferred-id r1 repeat)])
(inferred-prim-rule repeat r1 [(inferred-id r1 repeat) (lit "+") (id term)] [])
(prim-rule seq term [(id factor) (inferred-id r2 repeat)])
(inferred-prim-rule repeat r2 [(inferred-id r2 repeat) (lit "*") (id factor)] [])
(prim-rule token factor [(token INT)])))

@ -0,0 +1,8 @@
#lang racket/base
(require brag/examples/hide-top
brag/support
rackunit)
;; check that the top rule name can be cut (hidden)
(check-equal? (parse-to-datum "x") '("x"))

@ -51,13 +51,11 @@
(check-equal? (l "]")
'(RBRACKET "]" 1 2))
;; 220111: lexer now converts single-quoted lexemes
;; to standard Racket-style double-quoted string literal
(check-equal? (l "'hello'")
'(LIT "\"hello\"" 1 8))
'(LIT "'hello'" 1 8))
(check-equal? (l "'he\\'llo'")
'(LIT "\"he'llo\"" 1 10))
'(LIT "'he\\'llo'" 1 10))
(check-equal? (l "/")
'(HIDE "/" 1 2))

@ -0,0 +1,13 @@
#lang info
(define version "1.0")
(define collection 'multi)
(define deps '("base"
"br-parser-tools-lib"
"rackunit-lib"))
(define build-deps '("at-exp-lib"
"br-parser-tools-doc"
"racket-doc"
"scribble-lib"))
(define update-implies '("br-parser-tools-lib"))
Loading…
Cancel
Save