diff for review and inclusion in the build tonight
Fergus Henderson
fjh at cs.mu.oz.au
Thu Oct 9 23:12:30 AEST 1997
These changes both look fine. I will commit them.
Zoltan Somogyi, you wrote:
>
> This message has two diffs. The log message for the first:
>
> lexer.m:
> Change the representation of token lists to be a fat list
> instead of a list of pairs, since this reduces memory
> consumption, the number of calls to the memory allocator,
> and the amount of indirection required to access tokens.
>
> parser.m:
> Use the new token list representation.
>
> The second diff includes the first, since I am testing them together.
> You can look at ~zs/mer/ws3/Out.oct9 for the progress of the bootcheck,
> If it passes, and you approve of the diff, you should apply it
> before the build. (I am going home now, and can't wait for the bootcheck.)
> BTW, I started the bootcheck just after doing a cvs update to get Chris's
> changes.
>
> The second log message is:
>
> varset.m:
> Define a new predicate to create a new named variable. Previously
> this required to separate operations, creating a variable and
> giving it a name. Using the new predicate reduces memory consumption.
>
> parser.m:
> Use the new predicate in varset.m.
>
> Zoltan.
>
> ------------------------------------------------------------------------
>
> cvs diff: Diffing .
> Index: lexer.m
> ===================================================================
> RCS file: /home/mercury1/repository/mercury/library/lexer.m,v
> retrieving revision 1.26
> diff -u -r1.26 lexer.m
> --- lexer.m 1997/07/27 15:06:51 1.26
> +++ lexer.m 1997/10/09 08:49:44
> @@ -17,7 +17,7 @@
>
> :- module lexer.
> :- interface.
> -:- import_module list, std_util, io.
> +:- import_module io.
>
> :- type token
> ---> name(string)
> @@ -45,7 +45,9 @@
>
> :- type token_context == int. % line number
>
> -:- type token_list == list(pair(token, token_context)).
> +% This "fat list" representation is more efficient than a list of pairs.
> +:- type token_list ---> token_cons(token, token_context, token_list)
> + ; token_nil.
>
> :- pred lexer__get_token_list(token_list, io__state, io__state).
> :- mode lexer__get_token_list(out, di, uo) is det.
> @@ -70,7 +72,7 @@
> %-----------------------------------------------------------------------------%
>
> :- implementation.
> -:- import_module require, char, string, int.
> +:- import_module list, require, char, string, int.
>
> lexer__token_to_string(name(Name), String) :-
> string__append_list(["token '", Name, "'"], String).
> @@ -118,11 +120,11 @@
> lexer__get_token_list(Tokens) -->
> lexer__get_token(Token, Context),
> ( { Token = eof } ->
> - { Tokens = [] }
> + { Tokens = token_nil }
> ; { Token = end ; Token = error(_) ; Token = io_error(_) } ->
> - { Tokens = [Token - Context] }
> + { Tokens = token_cons(Token, Context, token_nil) }
> ;
> - { Tokens = [Token - Context | Tokens1] },
> + { Tokens = token_cons(Token, Context, Tokens1) },
> lexer__get_token_list(Tokens1)
> ).
>
> Index: parser.m
> ===================================================================
> RCS file: /home/mercury1/repository/mercury/library/parser.m,v
> retrieving revision 1.25
> diff -u -r1.25 parser.m
> --- parser.m 1997/09/06 17:39:58 1.25
> +++ parser.m 1997/10/09 08:49:44
> @@ -59,7 +59,7 @@
>
> parser__read_term(FileName, Result) -->
> lexer__get_token_list(Tokens),
> - ( { Tokens = [] } ->
> + ( { Tokens = token_nil } ->
> { Result = eof }
> ;
> { parser__init_state(FileName, Tokens, ParserState0) },
> @@ -85,7 +85,7 @@
> ;
> % find the token that caused the error
> (
> - ErrorTokens = [ErrorTok - ErrorTokLineNum| _]
> + ErrorTokens = token_cons(ErrorTok, ErrorTokLineNum, _)
> ->
> lexer__token_to_string(ErrorTok, TokString),
> string__append_list( ["Syntax error at ", TokString,
> @@ -93,7 +93,7 @@
> LineNum = ErrorTokLineNum
> ;
> (
> - Tokens = [_FirstTok - FirstTokLineNum | _]
> + Tokens = token_cons(_, FirstTokLineNum, _)
> ->
> LineNum = FirstTokLineNum
> ;
> @@ -110,7 +110,7 @@
> ->
> Result = error(Message, LineNum)
> ;
> - LeftOverTokens = [Token - LineNum | _]
> + LeftOverTokens = token_cons(Token, LineNum, _)
> ->
> lexer__token_to_string(Token, TokString),
> string__append("Syntax error: unexpected ", TokString,
> @@ -123,7 +123,8 @@
> :- pred parser__check_for_bad_token(token_list, string, int).
> :- mode parser__check_for_bad_token(in, out, out) is semidet.
>
> -parser__check_for_bad_token([Token - LineNum | Tokens], Message, LineNum) :-
> +parser__check_for_bad_token(token_cons(Token, LineNum, Tokens),
> + Message, LineNum) :-
> ( Token = io_error(IO_Error) ->
> io__error_message(IO_Error, IO_ErrorMessage),
> string__append("I/O error: ", IO_ErrorMessage, Message)
> @@ -665,7 +666,7 @@
> parser__get_token(Token, Context,
> parser__state(FileName, OpTable, VarSet, Tokens0, Names),
> parser__state(FileName, OpTable, VarSet, Tokens, Names)) :-
> - Tokens0 = [Token - Context | Tokens].
> + Tokens0 = token_cons(Token, Context, Tokens).
>
> :- pred parser__unget_token(token, token_context, parser__state, parser__state).
> :- mode parser__unget_token(in, in, in, out) is det.
> @@ -685,7 +686,7 @@
>
> parser__peek_token(Token, Context) -->
> =(parser__state(_, _, _, Tokens, _)),
> - { Tokens = [Token - Context | _] }.
> + { Tokens = token_cons(Token, Context, _) }.
>
> %-----------------------------------------------------------------------------%
>
> ------------------------------------------------------------------------
>
> cvs diff: Diffing .
> cvs diff: Diffing bindist
> cvs diff: Diffing boehm_gc
> cvs diff: Diffing boehm_gc/Mac_files
> cvs diff: Diffing boehm_gc/cord
> cvs diff: Diffing boehm_gc/cord/private
> cvs diff: Diffing boehm_gc/include
> cvs diff: Diffing boehm_gc/include/private
> cvs diff: Diffing bytecode
> cvs diff: Diffing bytecode/test
> cvs diff: Diffing compiler
> cvs diff: Diffing compiler/notes
> cvs diff: Diffing doc
> cvs diff: Diffing extras
> cvs diff: Diffing extras/cgi
> cvs diff: Diffing extras/complex_numbers
> cvs diff: Diffing extras/complex_numbers/samples
> cvs diff: Diffing extras/complex_numbers/tests
> cvs diff: Diffing extras/graphics
> cvs diff: Diffing extras/graphics/Togl-1.2
> cvs diff: Diffing extras/graphics/samples
> cvs diff: Diffing extras/graphics/samples/calc
> cvs diff: Diffing extras/graphics/samples/maze
> cvs diff: Diffing extras/odbc
> cvs diff: Diffing extras/trailed_update
> cvs diff: Diffing extras/trailed_update/samples
> cvs diff: Diffing library
> Index: library/lexer.m
> ===================================================================
> RCS file: /home/mercury1/repository/mercury/library/lexer.m,v
> retrieving revision 1.26
> diff -u -r1.26 lexer.m
> --- lexer.m 1997/07/27 15:06:51 1.26
> +++ lexer.m 1997/10/09 08:49:44
> @@ -17,7 +17,7 @@
>
> :- module lexer.
> :- interface.
> -:- import_module list, std_util, io.
> +:- import_module io.
>
> :- type token
> ---> name(string)
> @@ -45,7 +45,9 @@
>
> :- type token_context == int. % line number
>
> -:- type token_list == list(pair(token, token_context)).
> +% This "fat list" representation is more efficient than a list of pairs.
> +:- type token_list ---> token_cons(token, token_context, token_list)
> + ; token_nil.
>
> :- pred lexer__get_token_list(token_list, io__state, io__state).
> :- mode lexer__get_token_list(out, di, uo) is det.
> @@ -70,7 +72,7 @@
> %-----------------------------------------------------------------------------%
>
> :- implementation.
> -:- import_module require, char, string, int.
> +:- import_module list, require, char, string, int.
>
> lexer__token_to_string(name(Name), String) :-
> string__append_list(["token '", Name, "'"], String).
> @@ -118,11 +120,11 @@
> lexer__get_token_list(Tokens) -->
> lexer__get_token(Token, Context),
> ( { Token = eof } ->
> - { Tokens = [] }
> + { Tokens = token_nil }
> ; { Token = end ; Token = error(_) ; Token = io_error(_) } ->
> - { Tokens = [Token - Context] }
> + { Tokens = token_cons(Token, Context, token_nil) }
> ;
> - { Tokens = [Token - Context | Tokens1] },
> + { Tokens = token_cons(Token, Context, Tokens1) },
> lexer__get_token_list(Tokens1)
> ).
>
> Index: library/parser.m
> ===================================================================
> RCS file: /home/mercury1/repository/mercury/library/parser.m,v
> retrieving revision 1.25
> diff -u -r1.25 parser.m
> --- parser.m 1997/09/06 17:39:58 1.25
> +++ parser.m 1997/10/09 09:27:25
> @@ -59,7 +59,7 @@
>
> parser__read_term(FileName, Result) -->
> lexer__get_token_list(Tokens),
> - ( { Tokens = [] } ->
> + ( { Tokens = token_nil } ->
> { Result = eof }
> ;
> { parser__init_state(FileName, Tokens, ParserState0) },
> @@ -85,7 +85,7 @@
> ;
> % find the token that caused the error
> (
> - ErrorTokens = [ErrorTok - ErrorTokLineNum| _]
> + ErrorTokens = token_cons(ErrorTok, ErrorTokLineNum, _)
> ->
> lexer__token_to_string(ErrorTok, TokString),
> string__append_list( ["Syntax error at ", TokString,
> @@ -93,7 +93,7 @@
> LineNum = ErrorTokLineNum
> ;
> (
> - Tokens = [_FirstTok - FirstTokLineNum | _]
> + Tokens = token_cons(_, FirstTokLineNum, _)
> ->
> LineNum = FirstTokLineNum
> ;
> @@ -110,7 +110,7 @@
> ->
> Result = error(Message, LineNum)
> ;
> - LeftOverTokens = [Token - LineNum | _]
> + LeftOverTokens = token_cons(Token, LineNum, _)
> ->
> lexer__token_to_string(Token, TokString),
> string__append("Syntax error: unexpected ", TokString,
> @@ -123,7 +123,8 @@
> :- pred parser__check_for_bad_token(token_list, string, int).
> :- mode parser__check_for_bad_token(in, out, out) is semidet.
>
> -parser__check_for_bad_token([Token - LineNum | Tokens], Message, LineNum) :-
> +parser__check_for_bad_token(token_cons(Token, LineNum, Tokens),
> + Message, LineNum) :-
> ( Token = io_error(IO_Error) ->
> io__error_message(IO_Error, IO_ErrorMessage),
> string__append("I/O error: ", IO_ErrorMessage, Message)
> @@ -665,7 +666,7 @@
> parser__get_token(Token, Context,
> parser__state(FileName, OpTable, VarSet, Tokens0, Names),
> parser__state(FileName, OpTable, VarSet, Tokens, Names)) :-
> - Tokens0 = [Token - Context | Tokens].
> + Tokens0 = token_cons(Token, Context, Tokens).
>
> :- pred parser__unget_token(token, token_context, parser__state, parser__state).
> :- mode parser__unget_token(in, in, in, out) is det.
> @@ -685,7 +686,7 @@
>
> parser__peek_token(Token, Context) -->
> =(parser__state(_, _, _, Tokens, _)),
> - { Tokens = [Token - Context | _] }.
> + { Tokens = token_cons(Token, Context, _) }.
>
> %-----------------------------------------------------------------------------%
>
> @@ -703,8 +704,7 @@
> VarSet = VarSet0,
> Names = Names0
> ;
> - varset__new_var(VarSet0, Var, VarSet1),
> - varset__name_var(VarSet1, Var, VarName, VarSet),
> + varset__new_named_var(VarSet0, VarName, Var, VarSet),
> map__det_insert(Names0, VarName, Var, Names)
> ).
>
> Index: library/varset.m
> ===================================================================
> RCS file: /home/mercury1/repository/mercury/library/varset.m,v
> retrieving revision 1.54
> diff -u -r1.54 varset.m
> --- varset.m 1997/07/27 15:07:28 1.54
> +++ varset.m 1997/10/09 08:57:29
> @@ -44,6 +44,10 @@
> :- pred varset__new_var(varset, var, varset).
> :- mode varset__new_var(in, out, out) is det.
>
> + % create a new named variable
> +:- pred varset__new_named_var(varset, string, var, varset).
> +:- mode varset__new_named_var(in, in, out, out) is det.
> +
> % create multiple new variables
> :- pred varset__new_vars(varset, int, list(var), varset).
> :- mode varset__new_vars(in, in, out, out) is det.
> @@ -174,6 +178,11 @@
> varset(MaxId, Names, Vals)) :-
> term__create_var(MaxId0, Var, MaxId).
>
> +varset__new_named_var(varset(MaxId0, Names0, Vals), Name, Var,
> + varset(MaxId, Names, Vals)) :-
> + term__create_var(MaxId0, Var, MaxId),
> + varset__name_var_2(Names0, Var, Name, Names).
> +
> varset__new_vars(Varset0, NumVars, NewVars, Varset) :-
> varset__new_vars_2(Varset0, NumVars, [], NewVars, Varset).
>
> @@ -238,14 +247,20 @@
> % the same name costs O(N*N) time and space.
>
> varset__name_var(VarSet0, Id, Name, VarSet) :-
> + VarSet0 = varset(MaxId, Names0, Vals),
> + varset__name_var_2(Names0, Id, Name, Names),
> + VarSet = varset(MaxId, Names, Vals).
> +
> +:- pred varset__name_var_2(map(var, string), var, string, map(var, string)).
> +:- mode varset__name_var_2(in, in, in, out) is det.
> +
> +varset__name_var_2(Names0, Id, Name, Names) :-
> ( string__remove_suffix(Name, "'", _) ->
> error("varset__name_var: name is already primed")
> ;
> true
> ),
> - VarSet0 = varset(MaxId, Names0, Vals),
> - map__set(Names0, Id, Name, Names),
> - VarSet = varset(MaxId, Names, Vals).
> + map__set(Names0, Id, Name, Names).
>
> %-----------------------------------------------------------------------------%
>
> cvs diff: Diffing lp_solve
> cvs diff: Diffing lp_solve/lp_examples
> cvs diff: Diffing profiler
> cvs diff: Diffing runtime
> cvs diff: Diffing runtime/machdeps
> cvs diff: Diffing samples
> cvs diff: Diffing samples/c_interface
> cvs diff: Diffing samples/c_interface/c_calls_mercury
> cvs diff: Diffing samples/c_interface/cplusplus_calls_mercury
> cvs diff: Diffing samples/c_interface/mercury_calls_c
> cvs diff: Diffing samples/c_interface/mercury_calls_cplusplus
> cvs diff: Diffing samples/c_interface/simpler_c_calls_mercury
> cvs diff: Diffing samples/c_interface/simpler_cplusplus_calls_mercury
> cvs diff: Diffing samples/diff
> cvs diff: Diffing scripts
> cvs diff: Diffing tests
> cvs diff: Diffing tests/benchmarks
> cvs diff: Diffing tests/general
> cvs diff: Diffing tests/hard_coded
> cvs diff: Diffing tests/invalid
> cvs diff: Diffing tests/misc_tests
> cvs diff: Diffing tests/valid
> cvs diff: Diffing tests/warnings
> cvs diff: Diffing tools
> cvs diff: Diffing trial
> cvs diff: Diffing util
>
>
--
Fergus Henderson <fjh at cs.mu.oz.au> | "I have always known that the pursuit
WWW: <http://www.cs.mu.oz.au/~fjh> | of excellence is a lethal habit"
PGP: finger fjh at 128.250.37.3 | -- the last words of T. S. Garp.
More information about the developers
mailing list