[m-rev.] diff: style fixes for extras/lex
Julien Fischer
juliensf at csse.unimelb.edu.au
Mon Jan 10 15:32:06 AEDT 2011
extras/lex/*.m:
extras/lex/README*:
Formatting and style fixes. There are no changes to any algorithms.
Julien.
Index: README.lex
===================================================================
RCS file: /home/mercury/mercury1/repository/mercury/extras/lex/README.lex,v
retrieving revision 1.1
diff -u -r1.1 README.lex
--- README.lex 3 Dec 2002 04:48:31 -0000 1.1
+++ README.lex 10 Jan 2011 04:30:45 -0000
@@ -38,7 +38,7 @@
; rpar
; comment.
-Lexer = lex__init([
+Lexer = lex.init([
( identifier -> func(Id) = id(Id) ),
( signed_int -> func(Int) = int(Int) ),
( real -> func(Float) = float(Float) ),
@@ -50,28 +50,28 @@
The combinator return/2 is defined s.t. return(X) = ( func(_) = X ),
that is, it simply discards the matched string and returns X.
-(There is also lex__init/3 which takes an extra argument, namely a
+(There is also lex.init/3 which takes an extra argument, namely a
predicate which is used to silently ignore certain tokens such as
whitespace, say.)
-A lexer is activated by calling lex__start/2, which returns a (unique)
+A lexer is activated by calling lex.start/2, which returns a (unique)
lexer state:
- !:LexerState = lex__start(Lexer, !.IO)
+ !:LexerState = lex.start(Lexer, !.IO)
-The lex__read/3 predicate searches for the next, longest match in the
+The lex.read/3 predicate searches for the next, longest match in the
input stream and returns the corresponding token (or an error message
if there is no immediate match in the input stream):
- lex__read(Result, !LexerState),
+ lex.read(Result, !LexerState),
( Result = eof, ...
; Result = ok(Token), ...
; Result = error(Message, Offset), ...
)
When lexical analysis is complete, the input source may be obtained
-by calling lex__stop/1:
+by calling lex.stop/1:
- !:IO = lex__stop(!.LexerState)
+ !:IO = lex.stop(!.LexerState)
Index: lex.automata.m
===================================================================
RCS file: /home/mercury/mercury1/repository/mercury/extras/lex/lex.automata.m,v
retrieving revision 1.3
diff -u -r1.3 lex.automata.m
--- lex.automata.m 5 Aug 2010 06:55:43 -0000 1.3
+++ lex.automata.m 10 Jan 2011 04:30:45 -0000
@@ -18,18 +18,18 @@
%
%-----------------------------------------------------------------------------%
-:- module lex__automata.
-
+:- module lex.automata.
:- interface.
-:- import_module set, list, char.
-
-
+:- import_module char.
+:- import_module list.
+:- import_module set.
+%-----------------------------------------------------------------------------%
+
% States are labelled with non-negative integers.
%
-:- type state_no
- == int.
+:- type state_no == int.
:- type state_mc
---> state_mc(
@@ -41,8 +41,7 @@
:- inst null_transition_free_state_mc
== bound(state_mc(ground, ground, atom_transitions)).
-:- type transitions
- == list(transition).
+:- type transitions == list(transition).
:- inst atom_transitions == list_skel(atom_transition).
:- inst null_transitions == list_skel(null_transition).
@@ -55,4 +54,5 @@
:- inst null_transition == bound(null(ground, ground)).
%-----------------------------------------------------------------------------%
+:- end_module lex.automata.
%-----------------------------------------------------------------------------%
Index: lex.buf.m
===================================================================
RCS file: /home/mercury/mercury1/repository/mercury/extras/lex/lex.buf.m,v
retrieving revision 1.4
diff -u -r1.4 lex.buf.m
--- lex.buf.m 5 Aug 2010 06:55:43 -0000 1.4
+++ lex.buf.m 10 Jan 2011 04:30:45 -0000
@@ -92,13 +92,15 @@
%
%-----------------------------------------------------------------------------%
-:- module lex__buf.
-
+:- module lex.buf.
:- interface.
-:- import_module array, char, bool, string.
-
+:- import_module array.
+:- import_module bool.
+:- import_module char.
+:- import_module string.
+%-----------------------------------------------------------------------------%
% XXX We need a char and/or byte array datatype;
% array(char) uses one word for each char, which is
@@ -107,7 +109,7 @@
:- type buf
== array(char).
- % T is the type of the input source (typically io__state or string);
+ % T is the type of the input source (typically io.state or string);
% the user must initialise the buffer by specifying an appropriate
% read predicate.
%
@@ -197,7 +199,7 @@
init(BufReadPred, BufState, Buf) :-
BufState = buf_state(0, 0, 0, 0, initial_buf_size, no, BufReadPred),
- Buf = array__init(initial_buf_size, ('@')).
+ Buf = array.init(initial_buf_size, ('@')).
%-----------------------------------------------------------------------------%
@@ -213,7 +215,7 @@
( if Cursor < End then
- Result = ok(array__lookup(Buf0, Cursor - Origin)),
+ Result = ok(array.lookup(Buf0, Cursor - Origin)),
BufState = ( BufState0 ^ buf_cursor := Cursor + 1 ),
Buf = Buf0,
Src = Src0
@@ -231,7 +233,7 @@
( if Result = ok(Char) then
- Buf = array__set(Buf0, End - Origin, Char),
+ Buf = array.set(Buf0, End - Origin, Char),
BufState = (( BufState0
^ buf_cursor := Cursor + 1 )
^ buf_end := End + 1 )
@@ -264,10 +266,10 @@
adjust_buf(GarbageLength, ExtraLength, Buf0, Buf) :-
- Size0 = array__size(Buf0),
+ Size0 = array.size(Buf0),
( if GarbageLength < low_water_mark then /* We need to grow the buffer */
- array__init(Size0 + low_water_mark, ('@'), Buf1),
+ array.init(Size0 + low_water_mark, ('@'), Buf1),
ExtraLength = low_water_mark
else
Buf1 = Buf0,
@@ -284,7 +286,7 @@
shift_buf(I, Hi, Disp, Src, Tgt) =
( if I < Hi then
shift_buf(I + 1, Hi, Disp, Src,
- array__set(Tgt, I, array__lookup(Src, I + Disp)))
+ array.set(Tgt, I, array.lookup(Src, I + Disp)))
else
Tgt
).
@@ -311,11 +313,12 @@
From = BufState ^ buf_start - BufState ^ buf_origin,
Length = (BufState ^ buf_cursor - 1 - BufState ^ buf_start),
To = From + Length,
- String = string__from_char_list(array__fetch_items(Buf, From, To)).
+ String = string.from_char_list(array.fetch_items(Buf, From, To)).
%-----------------------------------------------------------------------------%
commit(BufState) = ( BufState ^ buf_start := BufState ^ buf_cursor ).
%-----------------------------------------------------------------------------%
+:- end_module lex.buf.
%-----------------------------------------------------------------------------%
Index: lex.convert_NFA_to_DFA.m
===================================================================
RCS file: /home/mercury/mercury1/repository/mercury/extras/lex/lex.convert_NFA_to_DFA.m,v
retrieving revision 1.4
diff -u -r1.4 lex.convert_NFA_to_DFA.m
--- lex.convert_NFA_to_DFA.m 5 Aug 2010 06:55:43 -0000 1.4
+++ lex.convert_NFA_to_DFA.m 10 Jan 2011 04:30:45 -0000
@@ -15,13 +15,10 @@
%
%-----------------------------------------------------------------------------%
-:- module lex__convert_NFA_to_DFA.
-
+:- module lex.convert_NFA_to_DFA.
:- interface.
-:- import_module lex__automata.
-
-
+:- import_module lex.automata.
:- func convert_NFA_to_DFA(state_mc) = state_mc.
:- mode convert_NFA_to_DFA(in(null_transition_free_state_mc)) =
@@ -32,9 +29,14 @@
:- implementation.
-:- import_module counter, list, set, map, char, int.
-
+:- import_module char.
+:- import_module counter.
+:- import_module int.
+:- import_module list.
+:- import_module map.
+:- import_module set.
+%-----------------------------------------------------------------------------%
:- type state_sets
== set(state_set).
@@ -67,8 +69,8 @@
%
NFAStopStates = NFA ^ smc_stop_states,
NFATransitions = NFA ^ smc_state_transitions,
- DFAStartStateSet = set__make_singleton_set(NFA ^ smc_start_state),
- DFAStartStateSets = set__make_singleton_set(DFAStartStateSet),
+ DFAStartStateSet = set.make_singleton_set(NFA ^ smc_start_state),
+ DFAStartStateSets = set.make_singleton_set(DFAStartStateSet),
% Calculate the powerset version of the DFA from the NFA.
%
@@ -82,12 +84,12 @@
% Replace the powerset state_no identifiers with numbers.
%
DFAStateNos = number_state_sets(DFAStateSets),
- map__lookup(DFAStateNos, DFAStartStateSet, DFAStartState),
- DFAStopStates0 = list__map(
- map__lookup(DFAStateNos),
- set__to_sorted_list(DFAStopStateSets)
+ map.lookup(DFAStateNos, DFAStartStateSet, DFAStartState),
+ DFAStopStates0 = list.map(
+ map.lookup(DFAStateNos),
+ set.to_sorted_list(DFAStopStateSets)
),
- DFAStopStates = set__list_to_set(DFAStopStates0),
+ DFAStopStates = set.list_to_set(DFAStopStates0),
DFATransitions = map_state_set_transitions_to_numbers(
DFAStateNos,
DFAStateSetTransitions
@@ -113,7 +115,7 @@
compute_DFA_state_sets_and_transitions(Ts, NewSs0, Ss0, Ss, STs0, STs) :-
- ( if set__empty(NewSs0) then
+ ( if set.empty(NewSs0) then
Ss = Ss0,
STs0 = STs
@@ -121,17 +123,17 @@
else
NewSTs =
- list__condense(
- list__map(state_set_transitions(Ts),set__to_sorted_list(NewSs0))
+ list.condense(
+ list.map(state_set_transitions(Ts),set.to_sorted_list(NewSs0))
),
- STs1 = list__append(NewSTs, STs0),
+ STs1 = list.append(NewSTs, STs0),
TargetSs =
- set__list_to_set(
- list__map(( func(trans(_, _, S)) = S ), NewSTs)
+ set.list_to_set(
+ list.map(( func(trans(_, _, S)) = S ), NewSTs)
),
- NewSs = TargetSs `set__difference` Ss0,
- Ss1 = NewSs `set__union` Ss0,
+ NewSs = TargetSs `set.difference` Ss0,
+ Ss1 = NewSs `set.union` Ss0,
compute_DFA_state_sets_and_transitions(Ts, NewSs, Ss1, Ss, STs1, STs)
).
@@ -148,7 +150,7 @@
state_set_transitions(Ts, S) = STs :-
TCs = transition_chars(Ts, S),
- STs = list__map(state_set_transition(Ts, S), TCs).
+ STs = list.map(state_set_transition(Ts, S), TCs).
%-----------------------------------------------------------------------------%
@@ -159,9 +161,9 @@
:- func transition_chars(transitions, state_set) = list(char).
transition_chars(Ts, S) =
- list__sort_and_remove_dups(
- list__condense(
- list__map(transition_chars_for_state(Ts), set__to_sorted_list(S))
+ list.sort_and_remove_dups(
+ list.condense(
+ list.map(transition_chars_for_state(Ts), set.to_sorted_list(S))
)
).
@@ -171,7 +173,7 @@
:- mode transition_chars_for_state(in, in) = out is det.
transition_chars_for_state(Ts, X) =
- list__filter_map(transition_char_for_state(X), Ts).
+ list.filter_map(transition_char_for_state(X), Ts).
%-----------------------------------------------------------------------------%
@@ -200,14 +202,14 @@
:- func target_state_set(transitions, state_set, char) = state_set.
target_state_set(Ts, S, C) =
- set__power_union(set__map(target_state_set_0(Ts, C), S)).
+ set.power_union(set.map(target_state_set_0(Ts, C), S)).
%-----------------------------------------------------------------------------%
:- func target_state_set_0(transitions, char, state_no) = state_set.
target_state_set_0(Ts, C, X) =
- set__list_to_set(list__filter_map(target_state(X, C), Ts)).
+ set.list_to_set(list.filter_map(target_state(X, C), Ts)).
%-----------------------------------------------------------------------------%
@@ -221,7 +223,7 @@
:- func compute_DFA_stop_state_sets(state_set, state_sets) = state_sets.
compute_DFA_stop_state_sets(StopStates, StateSets) =
- set__filter_map(stop_state_set(StopStates), StateSets).
+ set.filter_map(stop_state_set(StopStates), StateSets).
%-----------------------------------------------------------------------------%
@@ -229,20 +231,20 @@
:- mode stop_state_set(in, in) = out is semidet.
stop_state_set(StopStates, StateSet) = StateSet :-
- not set__empty(StopStates `set__intersect` StateSet).
+ not set.empty(StopStates `set.intersect` StateSet).
%-----------------------------------------------------------------------------%
:- func number_state_sets(state_sets) = state_set_no_map.
number_state_sets(Ss) = StateNos :-
- list__foldl2(
+ list.foldl2(
( pred(S::in, N::in, (N + 1)::out, Map0::in, Map::out) is det :-
- Map = map__set(Map0, S, N)
+ Map = map.set(Map0, S, N)
),
- set__to_sorted_list(Ss),
+ set.to_sorted_list(Ss),
0, _,
- map__init, StateNos
+ map.init, StateNos
).
%-----------------------------------------------------------------------------%
@@ -254,13 +256,14 @@
out(atom_transitions).
map_state_set_transitions_to_numbers(Map, STs) =
- list__map(
+ list.map(
( func(trans(SX, C, SY)) = trans(X, C, Y) :-
- X = map__lookup(Map, SX),
- Y = map__lookup(Map, SY)
+ X = map.lookup(Map, SX),
+ Y = map.lookup(Map, SY)
),
STs
).
%-----------------------------------------------------------------------------%
+:- end_module lex.convert_NFA_to_DFA.
%-----------------------------------------------------------------------------%
Index: lex.lexeme.m
===================================================================
RCS file: /home/mercury/mercury1/repository/mercury/extras/lex/lex.lexeme.m,v
retrieving revision 1.5
diff -u -r1.5 lex.lexeme.m
--- lex.lexeme.m 5 Aug 2010 06:55:43 -0000 1.5
+++ lex.lexeme.m 10 Jan 2011 04:30:45 -0000
@@ -22,11 +22,15 @@
%
%-----------------------------------------------------------------------------%
-:- module lex__lexeme.
-
+:- module lex.lexeme.
:- interface.
-:- import_module bool, char, array, bitmap.
+:- import_module array.
+:- import_module bool.
+:- import_module bitmap.
+:- import_module char.
+
+%-----------------------------------------------------------------------------%
:- type compiled_lexeme(T)
---> compiled_lexeme(
@@ -34,6 +38,7 @@
state :: state_no,
transition_map :: transition_map
).
+
:- inst compiled_lexeme
---> compiled_lexeme(token_creator, ground, ground).
@@ -45,22 +50,18 @@
% A transition row is an array of byte_transitions.
%
-:- type row
- == array(byte_transition).
+:- type row == array(byte_transition).
% A byte_transition encodes a target state_no no. in its upper bits
% and the char byte value in its lower eight bits for which the
% transition is valid.
%
-:- type byte_transition
- == int.
+:- type byte_transition == int.
:- func byte_transition(int, state_no) = byte_transition.
:- func btr_byte(byte_transition) = int.
:- func btr_state(byte_transition) = state_no.
-
-
:- func compile_lexeme(lexeme(T)) = compiled_lexeme(T).
% next_state(CLXM, CurrentState, Char, NextState, IsAccepting)
@@ -81,8 +82,12 @@
:- implementation.
-:- import_module list, set.
-:- import_module lex__automata, lex__convert_NFA_to_DFA, lex__regexp.
+:- import_module lex.automata.
+:- import_module lex.regexp.
+:- import_module lex.convert_NFA_to_DFA.
+
+:- import_module list.
+:- import_module set.
%-----------------------------------------------------------------------------%
@@ -94,7 +99,7 @@
StopStates = DFA ^ smc_stop_states,
Transitions = DFA ^ smc_state_transitions,
N = 1 + find_top_state(Transitions),
- Accepting = set_accepting_states(StopStates, bitmap__new(N, no)),
+ Accepting = set_accepting_states(StopStates, bitmap.new(N, no)),
Rows = array(set_up_rows(0, N, Transitions)),
TransitionMap = transition_map(Accepting, Rows),
CompiledLexeme = compiled_lexeme(TokenCreator, StartState, TransitionMap).
@@ -113,7 +118,7 @@
:- mode set_accepting_states(in, bitmap_di) = bitmap_uo is det.
set_accepting_states(States, Bitmap0) =
- set_accepting_states_0(set__to_sorted_list(States), Bitmap0).
+ set_accepting_states_0(set.to_sorted_list(States), Bitmap0).
@@ -123,7 +128,7 @@
set_accepting_states_0([], Bitmap) = Bitmap.
set_accepting_states_0([St | States], Bitmap) =
- set_accepting_states_0(States, bitmap__set(Bitmap, St)).
+ set_accepting_states_0(States, bitmap.set(Bitmap, St)).
%-----------------------------------------------------------------------------%
@@ -150,7 +155,7 @@
compile_transitions_for_state(
I,
( if T = trans(I, C, Y)
- then [byte_transition(char__to_int(C), Y) | IBTs]
+ then [byte_transition(char.to_int(C), Y) | IBTs]
else IBTs
),
Ts
@@ -169,7 +174,7 @@
next_state(CLXM, CurrentState, Char, NextState, IsAccepting) :-
Rows = CLXM ^ transition_map ^ rows,
AcceptingStates = CLXM ^ transition_map ^ accepting_states,
- find_next_state(char__to_int(Char), Rows ^ elem(CurrentState), NextState),
+ find_next_state(char.to_int(Char), Rows ^ elem(CurrentState), NextState),
IsAccepting = AcceptingStates ^ bit(NextState).
%-----------------------------------------------------------------------------%
@@ -178,12 +183,10 @@
:- mode find_next_state(in, in, out) is semidet.
find_next_state(Byte, ByteTransitions, State) :-
- Lo = array__min(ByteTransitions),
- Hi = array__max(ByteTransitions),
+ Lo = array.min(ByteTransitions),
+ Hi = array.max(ByteTransitions),
find_next_state_0(Lo, Hi, Byte, ByteTransitions, State).
-
-
:- pred find_next_state_0(int, int, int, array(byte_transition), state_no).
:- mode find_next_state_0(in, in, in, in, out) is semidet.
@@ -198,9 +201,10 @@
%-----------------------------------------------------------------------------%
in_accepting_state(CLXM) :-
- bitmap__is_set(
+ bitmap.is_set(
CLXM ^ transition_map ^ accepting_states, CLXM ^ state
).
%-----------------------------------------------------------------------------%
+:- end_module lex.lexeme.
%-----------------------------------------------------------------------------%
Index: lex.m
===================================================================
RCS file: /home/mercury/mercury1/repository/mercury/extras/lex/lex.m,v
retrieving revision 1.8
diff -u -r1.8 lex.m
--- lex.m 5 Aug 2010 06:55:43 -0000 1.8
+++ lex.m 10 Jan 2011 04:30:45 -0000
@@ -31,6 +31,8 @@
:- import_module pair.
:- import_module string.
+%-----------------------------------------------------------------------------%
+
:- type token_creator(Token)
== (func(string) = Token).
:- inst token_creator
@@ -169,7 +171,7 @@
% Handy read predicates.
%
-:- pred read_from_stdin(offset, read_result, io__state, io__state).
+:- pred read_from_stdin(offset, read_result, io.state, io.state).
:- mode read_from_stdin(in, out, di, uo) is det.
:- pred read_from_string(offset, read_result, string, string).
@@ -199,7 +201,7 @@
% lexeme that matches any unexpected char at the end of the
% list of lexemes.
%
-:- pred read(io__read_result(Tok),
+:- pred read(io.read_result(Tok),
lexer_state(Tok, Src), lexer_state(Tok, Src)).
:- mode read(out, di, uo) is det.
@@ -216,7 +218,7 @@
:- func stop(lexer_state(_Tok, Src)) = Src.
:- mode stop(di) = uo is det.
- % Sometimes (e.g. when lexing the io__io) you want access to the
+ % Sometimes (e.g. when lexing the io.io) you want access to the
% input stream without interrupting the lexing process. This pred
% provides that sort of access.
%
@@ -235,17 +237,26 @@
:- implementation.
-:- include_module lex__automata.
-:- include_module lex__buf.
-:- include_module lex__convert_NFA_to_DFA.
-:- include_module lex__lexeme.
-:- include_module lex__regexp.
-
-:- import_module map, char, bool, int, exception, array.
-:- import_module lex__regexp, lex__automata, lex__convert_NFA_to_DFA.
-:- import_module lex__lexeme, lex__buf.
-
+:- include_module lex.automata.
+:- include_module lex.buf.
+:- include_module lex.convert_NFA_to_DFA.
+:- include_module lex.lexeme.
+:- include_module lex.regexp.
+
+:- import_module array.
+:- import_module bool.
+:- import_module char.
+:- import_module exception.
+:- import_module int.
+:- import_module map.
+
+:- import_module lex.automata.
+:- import_module lex.buf.
+:- import_module lex.convert_NFA_to_DFA.
+:- import_module lex.lexeme.
+:- import_module lex.regexp.
+%-----------------------------------------------------------------------------%
:- type lexer(Token, Source)
---> lexer(
@@ -273,7 +284,7 @@
init_winner_func,
live_lexeme_list,
winner,
- buf__buf_state,
+ buf.buf_state,
ignore_pred
).
@@ -282,7 +293,7 @@
:- inst live_lexeme
== compiled_lexeme.
:- inst live_lexeme_list
- == list__list_skel(live_lexeme).
+ == list.list_skel(live_lexeme).
:- type init_winner_func(Token)
== ( func(offset) = winner(Token) ).
@@ -317,7 +328,7 @@
init(Lexemes, BufReadPred, IgnorePred) =
lexer(CompiledLexemes, IgnorePred, BufReadPred)
:-
- CompiledLexemes = list__map(compile_lexeme, Lexemes).
+ CompiledLexemes = list.map(compile_lexeme, Lexemes).
%-----------------------------------------------------------------------------%
@@ -342,7 +353,7 @@
:- mode init_lexer_instance(in(lexer), out(lexer_instance), array_uo) is det.
init_lexer_instance(Lexer, Instance, Buf) :-
- buf__init(Lexer ^ lex_buf_read_pred, BufState, Buf),
+ buf.init(Lexer ^ lex_buf_read_pred, BufState, Buf),
Start = BufState ^ start_offset,
InitWinnerFunc = initial_winner_func(InitLexemes),
InitLexemes = Lexer ^ lex_compiled_lexemes,
@@ -395,7 +406,7 @@
-:- pred read_2(io__read_result(Tok),
+:- pred read_2(io.read_result(Tok),
lexer_instance(Tok, Src), lexer_instance(Tok, Src),
buf, buf, Src, Src).
:- mode read_2(out,
@@ -409,7 +420,7 @@
BufState0 = !.Instance ^ buf_state,
- buf__read(BufReadResult, BufState0, BufState, !Buf, !Src),
+ buf.read(BufReadResult, BufState0, BufState, !Buf, !Src),
(
BufReadResult = ok(Char),
process_char(Result, Char, !Instance, BufState, !Buf, !Src)
@@ -420,7 +431,7 @@
%-----------------------------------------------------------------------------%
-:- pred process_char(io__read_result(Tok), char,
+:- pred process_char(io.read_result(Tok), char,
lexer_instance(Tok, Src), lexer_instance(Tok, Src),
buf_state(Src), buf, buf, Src, Src).
:- mode process_char(out, in, in(lexer_instance), out(lexer_instance),
@@ -448,7 +459,7 @@
%-----------------------------------------------------------------------------%
-:- pred process_any_winner(io__read_result(Tok), winner(Tok),
+:- pred process_any_winner(io.read_result(Tok), winner(Tok),
lexer_instance(Tok, Src), lexer_instance(Tok, Src),
buf_state(Src), buf, buf, Src, Src).
:- mode process_any_winner(out, in(winner),
@@ -476,7 +487,7 @@
% so it must be reported as an error.
%
( if String = "" then
- buf__read(BufResult, BufState1, BufState, Buf0, Buf, Src0, Src),
+ buf.read(BufResult, BufState1, BufState, Buf0, Buf, Src0, Src),
(
BufResult = ok(_),
Result = error("input not matched by any regexp", Offset)
@@ -509,7 +520,7 @@
%-----------------------------------------------------------------------------%
-:- pred process_eof(io__read_result(Tok),
+:- pred process_eof(io.read_result(Tok),
lexer_instance(Tok, Src), lexer_instance(Tok, Src),
buf_state(Src), buf).
:- mode process_eof(out, in(lexer_instance), out(lexer_instance),
@@ -634,7 +645,7 @@
lexer_state_args(!.State, Instance0, Buf0, Src0),
BufState0 = Instance0 ^ buf_state,
- buf__read(Result, BufState0, BufState, Buf0, Buf, Src0, Src),
+ buf.read(Result, BufState0, BufState, Buf0, Buf, Src0, Src),
Instance = ( Instance0 ^ buf_state := commit(BufState) ),
!:State = args_lexer_state(Instance, Buf, Src).
@@ -642,7 +653,7 @@
%-----------------------------------------------------------------------------%
read_from_stdin(_Offset, Result) -->
- io__read_char(IOResult),
+ io.read_char(IOResult),
{ IOResult = ok(Char), Result = ok(Char)
; IOResult = eof, Result = eof
; IOResult = error(_E), throw(IOResult)
@@ -655,8 +666,8 @@
% a char.
%
read_from_string(Offset, Result, String, unsafe_promise_unique(String)) :-
- ( if Offset < string__length(String)
- then Result = ok(string__unsafe_index(String, Offset))
+ ( if Offset < string.length(String)
+ then Result = ok(string.unsafe_index(String, Offset))
else Result = eof
).
@@ -685,8 +696,8 @@
( if S = "" then
R = null
else
- L = string__length(S),
- C = string__index_det(S, L - 1),
+ L = string.length(S),
+ C = string.index_det(S, L - 1),
R = str_foldr(func(Cx, Rx) = (Cx ++ Rx), S, re(C), L - 2)
)
].
@@ -707,17 +718,17 @@
( if S = "" then
R = null
else
- L = string__length(S),
- C = string__index_det(S, L - 1),
+ L = string.length(S),
+ C = string.index_det(S, L - 1),
R = str_foldr(func(Cx, Rx) = (Cx or Rx), S, re(C), L - 2)
).
anybut(S0) = R :-
- S = string__from_char_list(
- list__filter_map(
+ S = string.from_char_list(
+ list.filter_map(
( func(X) = C is semidet :-
- char__to_int(C, X),
- not string__contains_char(S0, C)
+ char.to_int(C, X),
+ not string.contains_char(S0, C)
),
0x01 `..` 0xff
)
@@ -728,7 +739,7 @@
str_foldr(Fn, S, X, I) =
( if I < 0 then X
- else str_foldr(Fn, S, Fn(string__index_det(S, I), X), I - 1)
+ else str_foldr(Fn, S, Fn(string.index_det(S, I), X), I - 1)
).
?(R) = (R or null).
Index: lex.regexp.m
===================================================================
RCS file: /home/mercury/mercury1/repository/mercury/extras/lex/lex.regexp.m,v
retrieving revision 1.4
diff -u -r1.4 lex.regexp.m
--- lex.regexp.m 5 Aug 2010 06:55:43 -0000 1.4
+++ lex.regexp.m 10 Jan 2011 04:30:45 -0000
@@ -21,13 +21,10 @@
%
%-----------------------------------------------------------------------------%
-:- module lex__regexp.
-
+:- module lex.regexp.
:- interface.
-:- import_module lex__automata.
-
-
+:- import_module lex.automata.
% Turn a regexp into an NFA.
%
@@ -43,16 +40,22 @@
:- implementation.
-:- import_module counter, map, assoc_list, std_util, list, set, string.
+:- import_module assoc_list.
+:- import_module counter.
+:- import_module list.
+:- import_module map.
+:- import_module set.
+:- import_module std_util.
+:- import_module string.
%-----------------------------------------------------------------------------%
regexp_to_NFA(R) = NFA :-
- C0 = counter__init(0),
- counter__allocate(Start, C0, C1),
- counter__allocate(Stop, C1, C),
+ C0 = counter.init(0),
+ counter.allocate(Start, C0, C1),
+ counter.allocate(Stop, C1, C),
compile(Start, R, Stop, Transitions, C, _),
- NFA = state_mc(Start, set__make_singleton_set(Stop), Transitions).
+ NFA = state_mc(Start, set.make_singleton_set(Stop), Transitions).
%-----------------------------------------------------------------------------%
@@ -66,7 +69,7 @@
compile(X, atom(C), Y, [trans(X, C, Y)]) --> [].
compile(X, conc(RA,RB), Y, TsA ++ TsB) -->
- counter__allocate(Z),
+ counter.allocate(Z),
compile(X, RA, Z, TsA),
compile(Z, RB, Y, TsB).
@@ -91,18 +94,18 @@
Ts = NFA0 ^ smc_state_transitions,
split_transitions(Ts, NullTs, CharTs),
- trans_closure(NullTs, map__init, _Ins, map__init, Outs),
+ trans_closure(NullTs, map.init, _Ins, map.init, Outs),
NullFreeTs = add_atom_transitions(Outs, CharTs),
StopStates0 = NFA0 ^ smc_stop_states,
StopStates1 =
- set__list_to_set(
- list__filter_map(
+ set.list_to_set(
+ list.filter_map(
nulls_to_stop_state(Outs, NFA0 ^ smc_stop_states),
NullTs
)
),
- StopStates = StopStates0 `set__union` StopStates1,
+ StopStates = StopStates0 `set.union` StopStates1,
NFA = (( NFA0
^ smc_state_transitions := NullFreeTs )
@@ -129,7 +132,7 @@
:- mode trans_closure(in(null_transitions), in, out, in, out) is det.
trans_closure(Ts, Ins0, Ins, Outs0, Outs) :-
- list__foldl2(add_edge, Ts, Ins0, Ins, Outs0, Outs).
+ list.foldl2(add_edge, Ts, Ins0, Ins, Outs0, Outs).
%-----------------------------------------------------------------------------%
@@ -137,20 +140,20 @@
:- mode add_edge(in(null_transition), in, out, in, out) is det.
add_edge(null(X, Y), Ins0, Ins, Outs0, Outs) :-
- XInAndX = set__insert(null_map_lookup(X, Ins0), X),
- YOutAndY = set__insert(null_map_lookup(Y, Outs0), Y),
- Xs = set__to_sorted_list(XInAndX),
- Ys = set__to_sorted_list(YOutAndY),
- Outs = list__foldl(add_to_null_mapping(YOutAndY), Xs, Outs0),
- Ins = list__foldl(add_to_null_mapping(XInAndX), Ys, Ins0).
+ XInAndX = set.insert(null_map_lookup(X, Ins0), X),
+ YOutAndY = set.insert(null_map_lookup(Y, Outs0), Y),
+ Xs = set.to_sorted_list(XInAndX),
+ Ys = set.to_sorted_list(YOutAndY),
+ Outs = list.foldl(add_to_null_mapping(YOutAndY), Xs, Outs0),
+ Ins = list.foldl(add_to_null_mapping(XInAndX), Ys, Ins0).
%-----------------------------------------------------------------------------%
:- func null_map_lookup(state_no, null_map) = set(state_no).
null_map_lookup(X, Map) =
- ( if map__search(Map, X, Ys) then Ys
- else set__init
+ ( if map.search(Map, X, Ys) then Ys
+ else set.init
).
%-----------------------------------------------------------------------------%
@@ -158,7 +161,7 @@
:- func add_to_null_mapping(set(state_no), state_no, null_map) = null_map.
add_to_null_mapping(Xs, Y, Map) =
- map__set(Map, Y, Xs `set__union` null_map_lookup(Y, Map)).
+ map.set(Map, Y, Xs `set.union` null_map_lookup(Y, Map)).
%-----------------------------------------------------------------------------%
@@ -167,12 +170,12 @@
out(atom_transitions) is det.
add_atom_transitions(Outs, CTs) =
- list__sort_and_remove_dups(
- list__condense(
+ list.sort_and_remove_dups(
+ list.condense(
[ CTs
- | list__map(
+ | list.map(
add_atom_transitions_0(CTs),
- map__to_assoc_list(Outs)
+ map.to_assoc_list(Outs)
)
]
)
@@ -186,8 +189,8 @@
out(atom_transitions) is det.
add_atom_transitions_0(CTs, X - Ys) =
- list__condense(
- list__map(add_atom_transitions_1(CTs, X), set__to_sorted_list(Ys))
+ list.condense(
+ list.map(add_atom_transitions_1(CTs, X), set.to_sorted_list(Ys))
).
%-----------------------------------------------------------------------------%
@@ -197,7 +200,7 @@
out(atom_transitions) is det.
add_atom_transitions_1(CTs0, X, Y) = CTs :-
- list__filter_map(maybe_copy_transition(X, Y), CTs0, CTs).
+ list.filter_map(maybe_copy_transition(X, Y), CTs0, CTs).
%-----------------------------------------------------------------------------%
@@ -214,9 +217,10 @@
nulls_to_stop_state(Outs, StopStates, null(X, _Y)) = X :-
some [Z] (
- set__member(Z, map__lookup(Outs, X)),
- set__member(Z, StopStates)
+ set.member(Z, map.lookup(Outs, X)),
+ set.member(Z, StopStates)
).
%-----------------------------------------------------------------------------%
+:- end_module lex.regexp.
%-----------------------------------------------------------------------------%
Index: regex.m
===================================================================
RCS file: /home/mercury/mercury1/repository/mercury/extras/lex/regex.m,v
retrieving revision 1.4
diff -u -r1.4 regex.m
--- regex.m 5 Aug 2010 06:55:44 -0000 1.4
+++ regex.m 10 Jan 2011 04:30:45 -0000
@@ -31,12 +31,15 @@
%-----------------------------------------------------------------------------%
:- module regex.
-
:- interface.
-:- import_module string, list.
:- import_module lex.
+:- import_module list.
+:- import_module string.
+
+%-----------------------------------------------------------------------------%
+
% The type of (compiled) regular expressions.
%
:- type regex.
@@ -181,8 +184,15 @@
:- implementation.
-:- import_module int, char, bool, require, std_util, pair, io.
+:- import_module bool.
+:- import_module char.
+:- import_module int.
+:- import_module io.
+:- import_module pair.
+:- import_module require.
+:- import_module std_util.
+%-----------------------------------------------------------------------------%
:- type regex == lexer(string, string).
@@ -306,7 +316,7 @@
:- func int_to_char(int) = char.
int_to_char(X) =
- ( if char__to_int(C, X) then C else func_error("regex__int_to_char") ).
+ ( if char.to_int(C, X) then C else func_error("regex.int_to_char") ).
%-----------------------------------------------------------------------------%
@@ -423,8 +433,8 @@
extract_regex(re(R)) = R.
extract_regex(char(R)) = R.
-extract_regex(alt(_)) = func_error("regex__extract_regex").
-extract_regex(lpar) = func_error("regex__extract_regex").
+extract_regex(alt(_)) = func_error("regex.extract_regex").
+extract_regex(lpar) = func_error("regex.extract_regex").
%-----------------------------------------------------------------------------%
@@ -449,7 +459,7 @@
left_match(Regex, String, Substring, 0, length(Substring)) :-
State = start(Regex, unsafe_promise_unique(String)),
- lex__read(ok(Substring), State, _).
+ lex.read(ok(Substring), State, _).
%-----------------------------------------------------------------------------%
@@ -484,8 +494,8 @@
:- mode first_match_2(out, out, di ) is semidet.
first_match_2(Substring, Start, !.State) :-
- lex__offset_from_start(Start0, !State),
- lex__read(Result, !State),
+ lex.offset_from_start(Start0, !State),
+ lex.read(Result, !State),
(
Result = error(_, _),
first_match_2(Substring, Start, !.State)
@@ -498,7 +508,7 @@
exact_match(Regex, String) :-
State = start(Regex, unsafe_promise_unique(String)),
- lex__read(ok(String), State, _).
+ lex.read(ok(String), State, _).
%-----------------------------------------------------------------------------%
@@ -511,9 +521,9 @@
:- mode matches_2(in, in, di) = out is det.
matches_2(Length, LastEnd, State0) = Matches :-
- lex__offset_from_start(Start0, State0, State1),
- lex__read(Result, State1, State2),
- lex__offset_from_start(End, State2, State3),
+ lex.offset_from_start(Start0, State0, State1),
+ lex.read(Result, State1, State2),
+ lex.offset_from_start(End, State2, State3),
(
Result = eof,
Matches = []
@@ -543,7 +553,7 @@
% This is an empty match at the same point as the end
% of our last match. We have to ignore it and move on.
%
- ( if lex__read_char(ok(_), State3, State4)
+ ( if lex.read_char(ok(_), State3, State4)
then matches_2(Length, End, State4)
else []
)
@@ -554,7 +564,7 @@
( if End = Length then
[]
else if Count = 0 then
- ( if lex__read_char(ok(_), State3, State4)
+ ( if lex.read_char(ok(_), State3, State4)
then matches_2(Length, End, State4)
else []
)
@@ -606,4 +616,5 @@
| change_all_2(String, ChangeFn, Start + Count, Matches) ].
%-----------------------------------------------------------------------------%
+:- end_module regex.
%-----------------------------------------------------------------------------%
Index: samples/lex_demo.m
===================================================================
RCS file: /home/mercury/mercury1/repository/mercury/extras/lex/samples/lex_demo.m,v
retrieving revision 1.1
diff -u -r1.1 lex_demo.m
--- samples/lex_demo.m 3 Dec 2002 04:48:33 -0000 1.1
+++ samples/lex_demo.m 10 Jan 2011 04:30:45 -0000
@@ -1,4 +1,4 @@
-%----------------------------------------------------------------------------- %
+%-----------------------------------------------------------------------------%
% lex_demo.m
% Sun Aug 20 18:11:42 BST 2000
%
@@ -13,30 +13,33 @@
% This file may only be copied under the terms of the GNU Library General
% Public License - see the file COPYING.LIB in the Mercury distribution.
%
-%
-%----------------------------------------------------------------------------- %
+%-----------------------------------------------------------------------------%
:- module lex_demo.
-
:- interface.
:- import_module io.
:- pred main(io::di, io::uo) is det.
-%----------------------------------------------------------------------------- %
-%----------------------------------------------------------------------------- %
+%-----------------------------------------------------------------------------%
+%-----------------------------------------------------------------------------%
:- implementation.
-:- import_module string, int, float, exception, list.
:- import_module lex.
-%----------------------------------------------------------------------------- %
+:- import_module exception.
+:- import_module float.
+:- import_module int.
+:- import_module list.
+:- import_module string.
+
+%-----------------------------------------------------------------------------%
-main(IO0, IO) :-
+main(!IO) :-
- io__print("\
+ io.print("\
I recognise the following words:
""cat"", ""dog"", ""rat"", ""mat"", ""sat"", ""caught"", ""chased"",
@@ -46,30 +49,32 @@
Try me...
-", IO0, IO1),
+", !IO),
- Lexer = lex__init(lexemes, lex__read_from_stdin, ignore(space)),
- State0 = lex__start(Lexer, IO1),
+ Lexer = lex.init(lexemes, lex.read_from_stdin, ignore(space)),
+ State0 = lex.start(Lexer, !.IO),
tokenise_stdin(State0, State),
- IO = lex__stop(State).
+ !:IO = lex.stop(State).
-%----------------------------------------------------------------------------- %
+%-----------------------------------------------------------------------------%
-:- pred tokenise_stdin(lexer_state(token, io__state),
- lexer_state(token, io__state)).
-:- mode tokenise_stdin(di, uo) is det.
-
-tokenise_stdin -->
- lex__read(Result),
- lex__manipulate_source(io__print(Result)),
- lex__manipulate_source(io__nl),
- ( if { Result \= eof } then
- tokenise_stdin
- else
- []
+:- pred tokenise_stdin(lexer_state(token, io)::di, lexer_state(token, io)::uo)
+ is det.
+
+tokenise_stdin(!LS) :-
+ lex.read(Result, !LS),
+ lex.manipulate_source(io.print(Result), !LS),
+ lex.manipulate_source(io.nl, !LS),
+ (
+ Result = ok(_),
+ tokenise_stdin(!LS)
+ ;
+ ( Result = eof
+ ; Result = error(_, _)
+ )
).
-%----------------------------------------------------------------------------- %
+%-----------------------------------------------------------------------------%
:- type token
---> noun(string)
@@ -81,16 +86,15 @@
; prep(string)
; punc
; space
- ; unrecognised(string)
- .
+ ; unrecognised(string).
:- func lexemes = list(lexeme(token)).
lexemes = [
( "%" ++ junk -> (func(Match) = comment(Match)) ),
- ( signed_int -> (func(Match) = integer(string__det_to_int(Match))) ),
- ( real -> (func(Match) = real(det_string_to_float(Match))) ),
+ ( signed_int -> (func(Match) = integer(string.det_to_int(Match))) ),
+ ( real -> (func(Match) = real(string.det_to_float(Match))) ),
% Multiple regexps can match the same token constructor.
%
@@ -124,15 +128,6 @@
( dot -> func(Match) = unrecognised(Match) )
].
-
-
-:- func det_string_to_float(string) = float.
-
-det_string_to_float(String) =
- ( if string__to_float(String, Float)
- then Float
- else throw("error in float conversion")
- ).
-
-%----------------------------------------------------------------------------- %
-%----------------------------------------------------------------------------- %
+%-----------------------------------------------------------------------------%
+:- end_module lex_demo.
+%-----------------------------------------------------------------------------%
Index: samples/regex_demo.m
===================================================================
RCS file: /home/mercury/mercury1/repository/mercury/extras/lex/samples/regex_demo.m,v
retrieving revision 1.1
diff -u -r1.1 regex_demo.m
--- samples/regex_demo.m 3 Dec 2002 04:48:33 -0000 1.1
+++ samples/regex_demo.m 10 Jan 2011 04:30:45 -0000
@@ -13,13 +13,10 @@
%-----------------------------------------------------------------------------%
:- module regex_demo.
-
:- interface.
:- import_module io.
-
-
:- pred main(io::di, io::uo) is det.
%-----------------------------------------------------------------------------%
@@ -27,15 +24,19 @@
:- implementation.
-:- import_module string, list, exception.
-:- import_module lex, regex.
+:- import_module lex.
+:- import_module regex.
+
+:- import_module exception.
+:- import_module list.
+:- import_module string.
%-----------------------------------------------------------------------------%
main(!IO) :-
S = "([Ff][Oo][Oo])+",
M = change_all(regex(S), func(_) = "bar"),
- io__format("Replacing multiple \"foo\"s with a single \"bar\"...",
+ io.format("Replacing multiple \"foo\"s with a single \"bar\"...",
[], !IO),
loop(M, !IO).
@@ -45,8 +46,8 @@
:- mode loop(func(in) = out is det, di, uo) is det.
loop(M, !IO) :-
- io__format("\n> ", [], !IO),
- io__read_line_as_string(Res, !IO),
+ io.format("\n> ", [], !IO),
+ io.read_line_as_string(Res, !IO),
(
Res = eof
;
@@ -54,9 +55,10 @@
throw(Res)
;
Res = ok(S),
- io__format(" %s", [s(M(S))], !IO),
+ io.format(" %s", [s(M(S))], !IO),
loop(M, !IO)
).
%-----------------------------------------------------------------------------%
+:- end_module regex_demo.
%-----------------------------------------------------------------------------%
--------------------------------------------------------------------------
mercury-reviews mailing list
Post messages to: mercury-reviews at csse.unimelb.edu.au
Administrative Queries: owner-mercury-reviews at csse.unimelb.edu.au
Subscriptions: mercury-reviews-request at csse.unimelb.edu.au
--------------------------------------------------------------------------
More information about the reviews
mailing list