;;
;; REPP tokenization builds on a collection of rule sets, each in a file of its
;; own.  these are called modules (or at times just REPPs), and all are loaded
;; into the processor.  a specific configuration is then obtained by picking
;; one REPP module as the top-level entry point, and determining which named
;; group calls (to other modules) should be allowed, if called.  the following
;; is the global set of available modules.
;;
repp-modules := 
  tokenizer xml latex ascii html wiki lgt gml robustness quotes ptb lkb.

;;
;; the REPP module to provide the top-level entry point.
;;
repp-tokenizer := tokenizer.

;;
;; REPP modules can be parameterized in terms of external named groups, which
;; conceptually resemble sub-routines and can be activated or deactivated; the
;; following is the default list of groups to activate (and may be overwritten
;; in individual REPP configurations).
;;
repp-calls := xml ascii lgt wiki quotes html gml.

;;
;; when requested to output the results of tokenization, REPP can write in a
;; number of different formats: the 'classic' YY format ('yy'), the more recent
;; XML feature structure chart ('fsc'), or as a plain string, using whitespace
;; to mark token boundaries.  this setting is also relevant when parsing under
;; [incr tsdb()] control, and there the best supported format obviously is YY.
;;
tokenizer-output := yy.