@Preamble{
"\input bibnames.sty " #
"\input path.sty " #
"\def \TM {${}^{\sc TM}$} " #
"\hyphenation{
}"}
@String{ack-nhfb = "Nelson H. F. Beebe,
University of Utah,
Department of Mathematics, 110 LCB,
155 S 1400 E RM 233,
Salt Lake City, UT 84112-0090, USA,
Tel: +1 801 581 5254,
FAX: +1 801 581 4148,
e-mail: \path|beebe@math.utah.edu|,
\path|beebe@acm.org|,
\path|beebe@computer.org| (Internet),
URL: \path|http://www.math.utah.edu/~beebe/|"}
@String{j-LOPLAS = "ACM Letters on Programming Languages and
Systems"}
@Article{Briggs:1992:CRP,
author = "Preston Briggs and Keith D. Cooper and Linda Torczon",
title = "Coloring register pairs",
journal = j-LOPLAS,
volume = "1",
number = "1",
pages = "3--13",
month = mar,
year = "1992",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/130616.130617",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/130617.html",
abstract = "Many architectures require that a program use pairs of
adjacent registers to hold double-precision
floating-point values. Register allocators based on
Chaitin's graph-coloring technique have trouble with
programs that contain both single-register values and
values that require adjacent pairs of registers. In
particular, Chaitin's algorithm often produces
excessive spilling on such programs. This results in
underuse of the register set; the extra loads and
stores inserted into the program for spilling also slow
execution.\par
An allocator based on an {\em optimistic} coloring
scheme naturally avoids this problem. Such allocators
delay the decision to spill a value until late in the
allocation process. This eliminates the over-spilling
provoked by adjacent register pairs in Chaitin's
scheme.\par
This paper discusses the representation of register
pairs in a graph coloring allocator. It explains the
problems that arise with Chaitin's allocator and shows
how the optimistic allocator avoids them. It provides a
rationale for determining how to add larger aggregates
to the interference graph.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "algorithms, languages, performance",
subject = "{\bf D.3.4}: Software, PROGRAMMING LANGUAGES,
Processors, Optimization. {\bf D.3.4}: Software,
PROGRAMMING LANGUAGES, Processors, Compilers. {\bf
D.3.4}: Software, PROGRAMMING LANGUAGES, Processors,
Code generation.",
}
@Article{Burke:1992:PEI,
author = "Michael Burke and Jong-Deok Choi",
title = "Precise and efficient integration of interprocedural
alias information into data-flow analysis",
journal = j-LOPLAS,
volume = "1",
number = "1",
pages = "14--21",
month = mar,
year = "1992",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/130616.130618",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/130618.html",
abstract = "Data-flow analysis is a basis for program optimization
and parallelizing transformations. The mechanism of
passing {\em reference} parameters at call sites
generates {\em interprocedural aliases} which
complicate this analysis. Solutions have been developed
for efficiently computing interprocedural aliases.
However, factoring the computed aliases into data-flow
information has been mostly overlooked, although
improper factoring results in imprecise (conservative)
data-flow information. In this document, we describe a
mechanism which, in factoring in interprocedural
aliases, computes data-flow information more precisely
and with less time and space overhead than previous
approaches.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "algorithms, languages, performance",
subject = "{\bf D.3.4}: Software, PROGRAMMING LANGUAGES,
Processors, Optimization. {\bf D.3.4}: Software,
PROGRAMMING LANGUAGES, Processors, Code generation.
{\bf D.3.4}: Software, PROGRAMMING LANGUAGES,
Processors, Compilers.",
}
@Article{Cooper:1992:USE,
author = "Keith D. Cooper and Mary W. Hall and Linda Torczon",
title = "Unexpected side effects of inline substitution: a case
study",
journal = j-LOPLAS,
volume = "1",
number = "1",
pages = "22--32",
month = mar,
year = "1992",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/130616.130619",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/130619.html",
abstract = "The structure of a program can encode implicit
information that changes both the shape and speed of
the generated code. Interprocedural transformations
like inlining often discard such information; using
interprocedural data-flow information as a basis for
optimization can have the same effect.\par
In the course of a study on inline substitution with
commercial FORTRAN compilers, we encountered unexpected
performance problems in one of the programs. This paper
describes the specific problem that we encountered,
explores its origins, and examines the ability of
several analytical techniques to help the compiler
avoid similar problems.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "experimentation, languages, performance",
subject = "{\bf D.3.4}: Software, PROGRAMMING LANGUAGES,
Processors, Optimization. {\bf D.3.4}: Software,
PROGRAMMING LANGUAGES, Processors, Compilers. {\bf
D.3.3}: Software, PROGRAMMING LANGUAGES, Language
Constructs and Features.",
}
@Article{Dornic:1992:PTS,
author = "Vincent Dornic and Pierre Jouvelot and David K.
Gifford",
title = "Polymorphic time systems for estimating program
complexity",
journal = j-LOPLAS,
volume = "1",
number = "1",
pages = "33--45",
month = mar,
year = "1992",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/130616.130620",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/130620.html",
abstract = "We present a new approach to static program analysis
that permits each expression in a program to be
assigned an execution time estimate. Our approach uses
a {\em time system} in conjunction with a conventional
type system to compute both the type and the time of an
expression. The {\em time} of an expression is either
an integer upper bound on the number of ticks the
expression will execute, or the distinguished element
long that indicates that the expression contains a
loop, and thus may run for an arbitrary length of time.
Every function type includes a {\em latent time} that
is used to communicate its expected execution time from
the point of its definition to the points of its use.
Unlike previous approaches, a time system works in the
presence of first-class functions and separate
compilation. In addition, {\em time polymorphism}
allows the time of a function to depend on the times of
any functions that it takes as arguments. Time
estimates are useful when compiling programs for
multiprocessors in order to balance the overhead of
initiating a concurrent computation against the
expected execution time of the computation. The
correctness of our time system is proven with respect
to a dynamic semantics.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "languages, performance, verification",
subject = "{\bf D.3.4}: Software, PROGRAMMING LANGUAGES,
Processors, Compilers. {\bf D.1.3}: Software,
PROGRAMMING TECHNIQUES, Concurrent Programming. {\bf
D.3.2}: Software, PROGRAMMING LANGUAGES, Language
Classifications, Applicative languages. {\bf D.3.4}:
Software, PROGRAMMING LANGUAGES, Processors,
Optimization. {\bf F.1.3}: Theory of Computation,
COMPUTATION BY ABSTRACT DEVICES, Complexity Classes.",
}
@Article{Johnson:1992:RLR,
author = "Ralph E. Johnson",
title = "Reducing the latency of a real-time garbage
collector",
journal = j-LOPLAS,
volume = "1",
number = "1",
pages = "46--58",
month = mar,
year = "1992",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/130616.130621",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/130621.html",
abstract = "This paper shows how to make the latency of scanning a
page in the Appel-Ellis-Li real-time garbage collector
be proportional only to the number of object references
on a page (the page size), instead of to the sum of the
sizes of the objects referenced by the page. This makes
the garbage collection algorithm much more suitable for
real-time systems.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "algorithms, languages, measurement, performance",
subject = "{\bf D.3.3}: Software, PROGRAMMING LANGUAGES, Language
Constructs and Features, Dynamic storage management.
{\bf C.3}: Computer Systems Organization,
SPECIAL-PURPOSE AND APPLICATION-BASED SYSTEMS,
Real-time systems.",
}
@Article{McKenney:1992:GPC,
author = "Bruce McKenney and Boleslaw K. Szymanski",
title = "Generating parallel code for {SIMD} machines",
journal = j-LOPLAS,
volume = "1",
number = "1",
pages = "59--73",
month = mar,
year = "1992",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/130616.130622",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/130622.html",
abstract = "Massively parallel SIMD machines rely on data
parallelism usually achieved by a careful hand coding
to support program efficiency. This paper describes
parallelization of code generated for SIMD machines by
the compiler for the Equational Programming Language,
EPL. The language supports architecture-independent
scientific programming by recurrent equations. The EPL
compiler serves as a programming aid for users of
parallel machines by automating data partitioning and
computation parallelization based on inherent data
dependencies. In support of a Connection Machine
architecture, the EPL compiler performs horizontal
partitioning of the program, a process that selects a
dimension of each data structure to be projected along
the processor array. Each processor then holds a single
instance of that structure and operations along the
projected dimension are done in parallel. The paper
describes horizontal partitioning, code generation in
MPL and efficiency of programs generated for Maspar
SIMD machine.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "design, languages, performance",
subject = "{\bf D.3.4}: Software, PROGRAMMING LANGUAGES,
Processors, Code generation. {\bf C.1.2}: Computer
Systems Organization, PROCESSOR ARCHITECTURES, Multiple
Data Stream Architectures (Multiprocessors), Connection
machines. {\bf D.1.1}: Software, PROGRAMMING
TECHNIQUES, Applicative (Functional) Programming. {\bf
D.1.3}: Software, PROGRAMMING TECHNIQUES, Concurrent
Programming, Parallel programming. {\bf D.3.4}:
Software, PROGRAMMING LANGUAGES, Processors, Compilers.
{\bf D.3.4}: Software, PROGRAMMING LANGUAGES,
Processors, Optimization. {\bf C.1.2}: Computer Systems
Organization, PROCESSOR ARCHITECTURES, Multiple Data
Stream Architectures (Multiprocessors),
Single-instruction-stream, multiple-data-stream
processors (SIMD).",
}
@Article{Netzer:1992:WRC,
author = "Robert H. B. Netzer and Barton P. Miller",
title = "What are race conditions?: {Some} issues and
formalizations",
journal = j-LOPLAS,
volume = "1",
number = "1",
pages = "74--88",
month = mar,
year = "1992",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/130616.130623",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/130623.html",
abstract = "In shared-memory parallel programs that use explicit
synchronization, {\em race conditions} result when
accesses to shared memory are not properly
synchronized. Race conditions are often considered to
be manifestations of bugs, since their presence can
cause the program to behave unexpectedly.
Unfortunately, there has been little agreement in the
literature as to precisely what constitutes a race
condition. Two different notions have been implicitly
considered: one pertaining to programs intended to be
deterministic (which we call {\em general races}) and
the other to nondeterministic programs containing
critical sections (which we call {\em data races}).
However, the differences between general races and data
races have not yet been recognized. This paper examines
these differences by characterizing races using a
formal model and exploring their properties. We show
that two variations of each type of race exist: {\em
feasible} general races and data races capture the
intuitive notions desired for debugging and {\em
apparent} races capture less accurate notions
implicitly assumed by most dynamic race detection
methods. We also show that locating feasible races is
an NP-hard problem, implying that only the apparent
races, which are approximations to feasible races, can
be detected in practice. The complexity of dynamically
locating apparent races depends on the type of
synchronization used by the program. Apparent races can
be exhaustively located efficiently only for weak types
of synchronization that are incapable of implementing
mutual exclusion. This result has important
implications since we argue that debugging general
races requires exhaustive race detection and is
inherently harder than debugging data races (which
requires only partial race detection). Programs
containing data races can therefore be efficiently
debugged by locating certain easily identifiable races.
In contrast, programs containing general races require
more complex debugging techniques.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "design, languages",
subject = "{\bf D.2.5}: Software, SOFTWARE ENGINEERING, Testing
and Debugging, Debugging aids. {\bf D.3.3}: Software,
PROGRAMMING LANGUAGES, Language Constructs and
Features, Concurrent programming structures. {\bf
D.4.1}: Software, OPERATING SYSTEMS, Process
Management, Concurrency. {\bf D.4.1}: Software,
OPERATING SYSTEMS, Process Management, Mutual
exclusion. {\bf D.4.1}: Software, OPERATING SYSTEMS,
Process Management, Synchronization. {\bf D.3.1}:
Software, PROGRAMMING LANGUAGES, Formal Definitions and
Theory, Syntax. {\bf D.3.4}: Software, PROGRAMMING
LANGUAGES, Processors, Parsing.",
}
@Article{Singh:1992:RGT,
author = "Ambuj K. Singh",
title = "On reasoning with the global time assumption",
journal = j-LOPLAS,
volume = "1",
number = "1",
pages = "89--103",
month = mar,
year = "1992",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/130616.130624",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/130624.html",
abstract = "Concurrency in distributed systems is usually modeled
by a nondeterministic interleaving of atomic events.
The consequences of this interleaving (or global time)
assumption on the specifications and proofs of
distributed programs are examined in this paper. A
construction for atomic registers is presented; this
construction has the surprising property that it is
correct with respect to a specification based on
partial orders but is incorrect with respect to a
naively derived specification based on global time.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "theory, verification",
subject = "{\bf D.2.1}: Software, SOFTWARE ENGINEERING,
Requirements/Specifications. {\bf D.1.3}: Software,
PROGRAMMING TECHNIQUES, Concurrent Programming. {\bf
F.3.1}: Theory of Computation, LOGICS AND MEANINGS OF
PROGRAMS, Specifying and Verifying and Reasoning about
Programs. {\bf D.4.2}: Software, OPERATING SYSTEMS,
Storage Management.",
}
@Article{Asuru:1992:OAS,
author = "Jonathan M. Asuru",
title = "Optimization of array subscript range checks",
journal = j-LOPLAS,
volume = "1",
number = "2",
pages = "109--118",
month = jun,
year = "1992",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/151333.151392",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/151392.html",
abstract = "Compile-time elimination of subscript range checks is
performed by some optimizing compilers to reduce the
overhead associated with manipulating array data
structures. Elimination and propagation, the two
methods of subscript range check optimization, are less
effective for eliminating global redundancies
especially in while-loop structures with nonconstant
loop guards. This paper describes a subscript range
check optimization procedure that can eliminate more
range checks than current methods. Two transformations
called inner-loop guard elimination and conservative
expression substitution are introduced to enhance
propagation of range checks in nested while-loops and
to define a partial order on related range checks.
Global elimination is improved by considering range
checks performed before control reaches a statement and
after control leaves a statement. A unique feature of
this method is the simplification of the available
range-check analysis system for global elimination.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "performance",
subject = "{\bf D.3.4}: Software, PROGRAMMING LANGUAGES,
Processors, Compilers. {\bf D.3.4}: Software,
PROGRAMMING LANGUAGES, Processors, Optimization. {\bf
D.3.3}: Software, PROGRAMMING LANGUAGES, Language
Constructs and Features, Data types and structures.",
}
@Article{Dietrich:1992:SPA,
author = "Suzanne W. Dietrich",
title = "Shortest path by approximation in logic programs",
journal = j-LOPLAS,
volume = "1",
number = "2",
pages = "119--137",
month = jun,
year = "1992",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/151333.151377",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/151377.html",
abstract = "An approximation paradigm is proposed for logic
programming as a simple modification to a complete
evaluation strategy. The motivational example
illustrates how a straightforward transformation of a
declarative specification of the distance between two
vertices in a directed graph leads to sophisticated
algorithms for computing shortest paths. The goal of
the work presented in this paper is {\em not} to
provide a more efficient computation of shortest paths
but to investigate how the intermediate tables, known
as extension tables, generated by the complete
evaluation strategy might be used in approximation
algorithms. We present the $ET_\mbox{distance}$
algorithm in perspective, its execution is compared to
those of Dijkstra's single-source and Floyd's all-pairs
shortest path algorithms.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "algorithms, performance, theory",
subject = "{\bf F.3.1}: Theory of Computation, LOGICS AND
MEANINGS OF PROGRAMS, Specifying and Verifying and
Reasoning about Programs, Logics of programs. {\bf
F.4.1}: Theory of Computation, MATHEMATICAL LOGIC AND
FORMAL LANGUAGES, Mathematical Logic, Logic
programming. {\bf G.3}: Mathematics of Computing,
PROBABILITY AND STATISTICS, Probabilistic algorithms
(including Monte Carlo). {\bf F.2.2}: Theory of
Computation, ANALYSIS OF ALGORITHMS AND PROBLEM
COMPLEXITY, Nonnumerical Algorithms and Problems,
Computations on discrete structures. {\bf G.2.2}:
Mathematics of Computing, DISCRETE MATHEMATICS, Graph
Theory, Path and circuit problems.",
}
@Article{Goldberg:1992:DFD,
author = "David Goldberg",
title = "The design of floating-point data types",
journal = j-LOPLAS,
volume = "1",
number = "2",
pages = "138--151",
month = jun,
year = "1992",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/151333.151373",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/151373.html",
abstract = "The issues involved in designing the floating-point
part of a programming language are discussed. Looking
at the language specifications for most existing
languages might suggest that this design involves only
trivial issues, such as whether to have one or two
types of REALs or how to name the functions that
convert from INTEGER to REAL. It is shown that there
are more significant semantic issues involved. After
discussing the trade-offs for the major design
decisions, they are illustrated by presenting the
design of the floating-point part of the Modula-3
language.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "design, languages",
subject = "{\bf D.3.3}: Software, PROGRAMMING LANGUAGES, Language
Constructs and Features, Data types and structures.
{\bf G.1.0}: Mathematics of Computing, NUMERICAL
ANALYSIS, General, Computer arithmetic. {\bf G.1.0}:
Mathematics of Computing, NUMERICAL ANALYSIS, General,
Error analysis. {\bf F.3.2}: Theory of Computation,
LOGICS AND MEANINGS OF PROGRAMS, Semantics of
Programming Languages. {\bf F.3.1}: Theory of
Computation, LOGICS AND MEANINGS OF PROGRAMS,
Specifying and Verifying and Reasoning about Programs,
Specification techniques.",
}
@Article{McConnell:1992:USS,
author = "Carl McConnell and Ralph E. Johnson",
title = "Using static single assignment form in a code
optimizer",
journal = j-LOPLAS,
volume = "1",
number = "2",
pages = "152--160",
month = jun,
year = "1992",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/151333.151368",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/151368.html",
abstract = "Static single assignment form represents data
dependences elegantly and provides a basis for powerful
optimizations. Table-driven techniques for peephole
optimization and code generation are straightforward
and effective. it is natural to want to use both
together in a code optimizer. However, doing so reveals
that static single assignment form does not remove all
antidependences, and that it conflicts with
table-driven code generation for 2-address machines.
This paper describes these problems and how to solve
them.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "algorithms, design",
subject = "{\bf D.3.4}: Software, PROGRAMMING LANGUAGES,
Processors, Optimization. {\bf D.3.4}: Software,
PROGRAMMING LANGUAGES, Processors, Compilers. {\bf
D.3.4}: Software, PROGRAMMING LANGUAGES, Processors,
Code generation.",
}
@Article{Tarditi:1992:NAR,
author = "David Tarditi and Peter Lee and Anurag Acharya",
title = "No assembly required: compiling standard {ML} to {C}",
journal = j-LOPLAS,
volume = "1",
number = "2",
pages = "161--177",
month = jun,
year = "1992",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/151333.151343",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/151343.html",
abstract = "C has been used as a portable target language for
implementing languages like Standard ML and Scheme.
Previous efforts at compiling these languages to C have
produced efficient code, but have compromised on
portability and proper tail recursion. We show how to
compile Standard ML to C without making such
compromises. The compilation technique is based on
converting Standard ML to a continuation-passing style
[lambda]-calculus intermediate language and then
compiling this language to C. The code generated by
this compiler achieves an execution speed that is about
a factor of two slower than that generated by a native
code compiler. The compiler generates highly portable
code, yet still supports advanced features like garbage
collection and first-class continuations. We analyze
the performance and determine the aspects of the
compilation method that lead to the observed slowdown.
We also suggest changes to C compilers that would
better support such compilation methods.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "design, languages, performance, theory",
subject = "{\bf D.3.4}: Software, PROGRAMMING LANGUAGES,
Processors, Compilers. {\bf D.3.2}: Software,
PROGRAMMING LANGUAGES, Language Classifications,
Standard ML. {\bf D.3.2}: Software, PROGRAMMING
LANGUAGES, Language Classifications, C. {\bf F.4.1}:
Theory of Computation, MATHEMATICAL LOGIC AND FORMAL
LANGUAGES, Mathematical Logic, Lambda calculus and
related systems. {\bf D.3.4}: Software, PROGRAMMING
LANGUAGES, Processors, Code generation.",
}
@Article{Weiss:1992:TCC,
author = "Michael Weiss",
title = "The transitive closure of control dependence: the
iterated join",
journal = j-LOPLAS,
volume = "1",
number = "2",
pages = "178--190",
month = jun,
year = "1992",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/151333.151337",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/151337.html",
abstract = "We characterize the transitive closure of the control
dependence relation and give an application to the
theory of control fow guards. We relate our result to
characterizations by Beck et al., by Sarkar, and by
Cytron et al., and strengthen a result of the latter
concerning dominance frontiers and join sets.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "languages, theory",
subject = "{\bf D.3.3}: Software, PROGRAMMING LANGUAGES, Language
Constructs and Features, Control structures. {\bf
D.3.3}: Software, PROGRAMMING LANGUAGES, Language
Constructs and Features, Data types and structures.
{\bf D.3.4}: Software, PROGRAMMING LANGUAGES,
Processors, Compilers. {\bf E.1}: Data, DATA
STRUCTURES, Graphs.",
}
@Article{Danvy:1992:CAS,
author = "Olivier Danvy and John Hatcliff",
title = "{CPS}-transformation after strictness analysis",
journal = j-LOPLAS,
volume = "1",
number = "3",
pages = "195--212",
month = sep,
year = "1992",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/151640.151641",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/151641.html",
abstract = "Strictness analysis is a common component of compilers
for call-by-name functional languages; the
continuation-passing-style (CPS-) transformation is a
common component of compilers for call-by-value
functional languages. To bridge these two
implementation techniques, the authors present a hybrid
CPS-transformation E/sub s/ for a language with
annotations resulting from strictness analysis. They
also address and solve the problem of recursive
binding. Finally, they express E/sub s/ in Nielson and
Nielson's two-level lambda-calculus, enabling a simple
and efficient implementation in a functional
programming language.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "design, languages, theory",
subject = "{\bf F.3.3}: Theory of Computation, LOGICS AND
MEANINGS OF PROGRAMS, Studies of Program Constructs,
Functional constructs. {\bf D.3.4}: Software,
PROGRAMMING LANGUAGES, Processors, Compilers. {\bf
D.3.4}: Software, PROGRAMMING LANGUAGES, Processors,
Optimization. {\bf F.4.1}: Theory of Computation,
MATHEMATICAL LOGIC AND FORMAL LANGUAGES, Mathematical
Logic, Lambda calculus and related systems. {\bf
I.2.2}: Computing Methodologies, ARTIFICIAL
INTELLIGENCE, Automatic Programming, Program
transformation. {\bf D.3.1}: Software, PROGRAMMING
LANGUAGES, Formal Definitions and Theory, Syntax. {\bf
D.3.2}: Software, PROGRAMMING LANGUAGES, Language
Classifications, Applicative languages. {\bf D.3.3}:
Software, PROGRAMMING LANGUAGES, Language Constructs
and Features, Control structures.",
}
@Article{Fraser:1992:ESE,
author = "Christopher W. Fraser and David R. Hanson and Todd A.
Proebsting",
title = "Engineering a simple, efficient code-generator
generator",
journal = j-LOPLAS,
volume = "1",
number = "3",
pages = "213--226",
month = sep,
year = "1992",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/151640.151642",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Fri Feb 17 18:41:11 2006",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://storage.webhop.net/documents/iburg.pdf;
http://www.acm.org/pubs/toc/Abstracts/1057-4514/151642.html;
http://www.cs.princeton.edu/software/iburg/",
abstract = "Many code-generator generators use tree pattern
matching and dynamic programming. This paper describes
a simple program that generates matchers that are fast,
compact, and easy to understand. It is simpler than
common alternatives: 200-700 lines of Icon or 950 lines
of C versus 3000 lines of C for Twig and 5000 for burg.
Its matchers run up to 25 times faster than Twig's.
They are necessarily slower than burg's BURS (bottom-up
rewrite system) matchers, but they are more flexible
and still practical.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "languages",
subject = "{\bf D.3.4}: Software, PROGRAMMING LANGUAGES,
Processors, Translator writing systems and compiler
generators. {\bf D.3.4}: Software, PROGRAMMING
LANGUAGES, Processors, Code generation. {\bf D.3.4}:
Software, PROGRAMMING LANGUAGES, Processors,
Compilers.",
}
@Article{Hall:1992:ECG,
author = "Mary W. Hall and Ken Kennedy",
title = "Efficient call graph analysis",
journal = j-LOPLAS,
volume = "1",
number = "3",
pages = "227--242",
month = sep,
year = "1992",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/151640.151643",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/151643.html",
abstract = "We present an efficient algorithm for computing the
procedure call graph, the program representation
underlying most interprocedural optimization
techniques. The algorithm computes the possible
bindings of procedure variables in languages where such
variables only receive their values through parameter
passing, such as Fortran. We extend the algorithm to
accommodate a limited form of assignments to procedure
variables. The resulting algorithm can also be used in
analysis of functional programs that have been
converted to Continuation-Passing-Style.\par
We discuss the algorithm in relationship to other call
graph analysis approaches. Many less efficient
techniques produce essentially the same call graph. A
few algorithms are more precise, but they may be
prohibitively expensive depending on language
features.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
subject = "{\bf D.3.3}: Software, PROGRAMMING LANGUAGES, Language
Constructs and Features, Procedures, functions, and
subroutines. {\bf D.3.4}: Software, PROGRAMMING
LANGUAGES, Processors, Compilers. {\bf D.3.4}:
Software, PROGRAMMING LANGUAGES, Processors,
Optimization.",
}
@Article{Hummel:1992:ADP,
author = "Joseph Hummel and Laurie J. Hendren and Alexandru
Nicolau",
title = "Abstract description of pointer data structures: an
approach for improving the analysis and optimization of
imperative programs",
journal = j-LOPLAS,
volume = "1",
number = "3",
pages = "243--260",
month = sep,
year = "1992",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/151640.151644",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/151644.html",
abstract = "Even though impressive progress has been made in the
area of optimizing and parallelizing array-based
programs, the application of similar techniques to
programs using pointer data structures has remained
difficult. Unlike arrays which have a small number of
well-defined properties, pointers can be used to
implement a wide variety of structures which exhibit a
much larger set of properties. The diversity of these
structures implies that programs with pointer data
structures cannot be effectively analyzed by
traditional optimizing and parallelizing
compilers.\par
In this paper we present a new approach that leads to
the improved analysis and transformation of programs
with recursively defined pointer data structures. Our
approach is based on a mechanism for the Abstract
Description of Data Structures (ADDS). ADDS is a simple
extension to existing imperative languages that allows
the programmer to explicitly describe the important
properties of a large class of data structures. These
abstract descriptions may be used by the compiler to
achieve more accurate program analysis in the presence
of pointers, which in turn enables and improves the
application of numerous optimizing and parallelizing
transformations. We present ADDS by describing various
data structures; we discuss how such descriptions can
be used to improve analysis and debugging; and we
supply three important transformations enabled by
ADDS.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "algorithms, languages",
subject = "{\bf D.3.4}: Software, PROGRAMMING LANGUAGES,
Processors, Optimization. {\bf D.3.4}: Software,
PROGRAMMING LANGUAGES, Processors, Compilers. {\bf
D.3.3}: Software, PROGRAMMING LANGUAGES, Language
Constructs and Features, Data types and structures.
{\bf E.2}: Data, DATA STORAGE REPRESENTATIONS, Linked
representations. {\bf E.1}: Data, DATA STRUCTURES.",
}
@Article{Pugh:1992:DDD,
author = "William Pugh",
title = "Definitions of dependence distance",
journal = j-LOPLAS,
volume = "1",
number = "3",
pages = "261--265",
month = sep,
year = "1992",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/151640.151645",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/151645.html",
abstract = "Data dependence distance is widely used to
characterize data dependences in advance optimizing
compilers. The standard definition of dependence
distance assumes that loops are normalized (have
constant lower bounds and a step of 1); there is not a
commonly accepted definition for unnormalized loops. We
have identified several potential definitions, all of
which give the same answer for normalized loops. There
are a number of subtleties involved in choosing between
these definitions, and no one definition is suitable
for all applications.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "languages, performance, theory",
subject = "{\bf D.3.4}: Software, PROGRAMMING LANGUAGES,
Processors, Optimization. {\bf D.3.4}: Software,
PROGRAMMING LANGUAGES, Processors, Compilers.",
}
@Article{Ramkumar:1992:DLC,
author = "Balkrishna Ramkumar",
title = "Distributed last call optimization for portable
parallel logic programming",
journal = j-LOPLAS,
volume = "1",
number = "3",
pages = "266--283",
month = sep,
year = "1992",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/151640.151345",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/151345.html",
abstract = "A difficult but challenging problem is the efficient
exploitation of AND and OR parallelism in logic
programs without making any assumptions about the
underlying target machine(s). In earlier papers, we
described the design of a binding environment for AND
and OR parallel execution of logic programs on shared
and nonshared memory machines and the performance of a
compiler (called ROLOG) using this binding environment
on a range of MIMD parallel machines.\par
In this paper, we present an important optimization for
{\em portable} parallel logic programming, namely {\em
distributed last-call optimization}, an analog of the
tail recursion optimization for sequential Prolog. This
scheme has been implemented in the ROLOG compiler,
which ports {\em unchanged} on several shared memory
and nonshared memory machines. We describe the effect
of this optimization on several OR, AND/OR and AND
parallel benchmark programs.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "algorithms, languages, performance",
subject = "{\bf D.1.3}: Software, PROGRAMMING TECHNIQUES,
Concurrent Programming, Parallel programming. {\bf
D.3.4}: Software, PROGRAMMING LANGUAGES, Processors,
Optimization. {\bf D.3.4}: Software, PROGRAMMING
LANGUAGES, Processors, Compilers. {\bf D.1.6}:
Software, PROGRAMMING TECHNIQUES, Logic Programming.
{\bf D.2.7}: Software, SOFTWARE ENGINEERING,
Distribution and Maintenance, Portability.",
}
@Article{Walker:1992:MIV,
author = "Kenneth Walker and Ralph E. Griswold",
title = "The maintenance of intermediate values in
goal-directed evaluation",
journal = j-LOPLAS,
volume = "1",
number = "3",
pages = "284--298",
month = sep,
year = "1992",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/151640.151341",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/151341.html",
abstract = "In programming languages that support goal-directed
evaluation to make use of alternative results, an
expression can produce a value, suspend, and later be
resumed to produce another value. This causes control
backtracking to earlier points in a computation and
complicates the maintenance of intermediate values.
This paper presents a space-efficient algorithm
computing the lifetimes of intermediate values that is
used by an optimizing compiler for the Icon programming
language. The algorithm is applicable to other
programming languages that employ goal-directed
evaluation.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "algorithms, design, performance",
subject = "{\bf D.3.1}: Software, PROGRAMMING LANGUAGES, Formal
Definitions and Theory. {\bf D.3.2}: Software,
PROGRAMMING LANGUAGES, Language Classifications. {\bf
D.3.3}: Software, PROGRAMMING LANGUAGES, Language
Constructs and Features. {\bf D.3.4}: Software,
PROGRAMMING LANGUAGES, Processors, Code generation.
{\bf D.3.4}: Software, PROGRAMMING LANGUAGES,
Processors, Compilers. {\bf D.3.4}: Software,
PROGRAMMING LANGUAGES, Processors, Optimization.",
}
@Article{Fritzson:1992:GAD,
author = "Peter Fritzson and Nahid Shahmehri and Mariam Kamkar
and Tibor Gyimothy",
title = "Generalized algorithmic debugging and testing",
journal = j-LOPLAS,
volume = "1",
number = "4",
pages = "303--322",
month = dec,
year = "1992",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/161494.161498",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/161498.html",
abstract = "This paper presents a method for semi-automatic bug
localization, generalized algorithmic debugging, which
has been integrated with the category partition method
for functional testing. In this way the efficiency of
the algorithmic debugging method for bug localization
can be improved by using test specifications and test
results. The long-range goal of this work is a
semi-automatic debugging and testing system which can
be used during large-scale program development of
nontrivial programs.\par
The method is generally applicable to procedural
languages and is not dependent on any ad hoc
assumptions regarding the subject program. The original
form of algorithmic debugging, introduced by Shapiro,
was however limited to small Prolog programs without
side-effects, but has later been generalized to
concurrent logic programming languages. Another
drawback of the original method is the large number of
interactions with the user during bug
localization.\par
To our knowledge, this is the first method which uses
category partition testing to improve the bug
localization properties of algorithmic debugging. The
method can avoid irrelevant questions to the programmer
by categorizing input parameters and then match these
against test cases in the test database. Additionally,
we use program slicing, a data flow analysis technique,
to dynamically compute which parts of the program are
relevant for the search, thus further improving bug
localization.\par
We believe that this is the first generalization of
algorithmic debugging for programs with side-effects
written in imperative languages such as Pascal. These
improvements together makes it more feasible to debug
larger programs. However, additional improvements are
needed to make it handle pointer-related side-effects
and concurrent Pascal programs.\par
A prototype generalized algorithmic debugger for a
Pascal subset without pointer side-effects and a test
case generator for application programs in Pascal, C,
dBase, and LOTUS have been implemented.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "algorithms, experimentation, performance",
subject = "{\bf D.2.5}: Software, SOFTWARE ENGINEERING, Testing
and Debugging, Debugging aids. {\bf D.2.6}: Software,
SOFTWARE ENGINEERING, Programming Environments.",
}
@Article{Landi:1992:USA,
author = "William Landi",
title = "Undecidability of static analysis",
journal = j-LOPLAS,
volume = "1",
number = "4",
pages = "323--337",
month = dec,
year = "1992",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/161494.161501",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/161501.html",
abstract = "Static analysis of programs is indispensable to any
software tool, environment, or system that requires
compile-time information about the semantics of
programs. With the emergence of languages like C and
LISP, static analysis of programs with dynamic storage
and recursive data structures has become a field of
active research. Such analysis is difficult, and the
static-analysis community has recognized the need for
simplifying assumptions and approximate solutions.
However, even under the common simplifying assumptions,
such analyses are harder than previously recognized.
Two fundamental static-analysis problems are {\em may
alias} and {\em must alias}. The former is not
recursive (is undecidable), and the latter is not
recursively enumerable (is uncomputable), even when all
paths are executable in the program being analyzed for
languages with if statements, loops, dynamic storage,
and recursive data structures.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "languages, theory",
subject = "{\bf D.3.4}: Software, PROGRAMMING LANGUAGES,
Processors. {\bf F.1.1}: Theory of Computation,
COMPUTATION BY ABSTRACT DEVICES, Models of Computation,
Bounded-action devices. {\bf F.4.1}: Theory of
Computation, MATHEMATICAL LOGIC AND FORMAL LANGUAGES,
Mathematical Logic, Computability theory.",
}
@Article{Nilsen:1992:COS,
author = "Kelvin D. Nilsen and William J. Schmidt",
title = "Cost-effective object space management for
hardware-assisted real-time garbage collection",
journal = j-LOPLAS,
volume = "1",
number = "4",
pages = "338--354",
month = dec,
year = "1992",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/161494.161508",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/161508.html",
abstract = "Modern object-oriented languages and programming
paradigms require finer-grain division of memory than
is provided by traditional paging and segmentation
systems. This paper describes the design of an OSM
(Object Space Manager) that allows partitioning of real
memory on object, rather than page, boundaries. The
time required by the OSM to create an object, or to
find the beginning of an object given a pointer to any
location within it, is approximately one memory cycle.
Object sizes are limited only by the availability of
address bits. In typical configurations of
object-oriented memory modules, one OSM chip is
required for every 16 RAM chips. The OSM serves a
central role in the implementation of a
hardware-assisted garbage collection system in which
the worst-case stop-and-wait garbage collection delay
ranges between 10 and 500 [mu]sec, depending on the
system configuration.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "management, performance",
subject = "{\bf B.7.1}: Hardware, INTEGRATED CIRCUITS, Types and
Design Styles. {\bf C.1.3}: Computer Systems
Organization, PROCESSOR ARCHITECTURES, Other
Architecture Styles. {\bf D.3.3}: Software, PROGRAMMING
LANGUAGES, Language Constructs and Features. {\bf
D.3.4}: Software, PROGRAMMING LANGUAGES, Processors.
{\bf D.4.7}: Software, OPERATING SYSTEMS, Organization
and Design.",
}
@Article{Srivastava:1992:UPO,
author = "Amitabh Srivastava",
title = "Unreachable procedures in object-oriented
programming",
journal = j-LOPLAS,
volume = "1",
number = "4",
pages = "355--364",
month = dec,
year = "1992",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/161494.161517",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/161517.html",
abstract = "Unreachable procedures are procedures that can never
be invoked. Their existence may adversely affect the
performance of a program. Unfortunately, their
detection requires the entire program to be present.
Using a long-time code modification system, we analyze
large, linked, program modules of C++, C, and Fortran.
We find that C++ programs using object-oriented
programming style contain a large fraction of
unreachable procedure code. In contrast, C and Fortran
programs have a low and essentially constant fraction
of unreachable code. In this article, we present our
analysis of C++, C, and Fortran programs, and we
discuss how object-oriented programming style generates
unreachable procedures.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "languages",
subject = "{\bf D.3.3}: Software, PROGRAMMING LANGUAGES, Language
Constructs and Features, Abstract data types. {\bf
D.3.4}: Software, PROGRAMMING LANGUAGES, Processors,
Code generation. {\bf D.3.4}: Software, PROGRAMMING
LANGUAGES, Processors, Compilers. {\bf D.3.4}:
Software, PROGRAMMING LANGUAGES, Processors,
Optimization.",
}
@Article{Ball:1993:WRC,
author = "Thomas Ball",
title = "What's in a region?: or computing control dependence
regions in near-linear time for reducible control
flow",
journal = j-LOPLAS,
volume = "2",
number = "4",
pages = "1--16",
month = mar,
year = "1993",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/176454.176456",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/176456.html",
abstract = "Regions of control dependence identify the
instructions in a program that execute under the same
control conditions. They have a variety of applications
in parallelizing and optimizing compilers. Two vertices
in a control-flow graph (which may represent
instructions or basic blocks in a program) are in the
same region if they have the same set of control
dependence predecessors. The common algorithm for
computing regions examines each control dependence at
least once. As there may be O(V x E) control
dependences in the worst case, where V and E are the
number of vertices and edges in the control-flow graph,
this algorithm has a worst-case running time of O(V x
D). We present algorithms for finding regions in
reducible control-flow graphs in near-linear time,
without using control dependence. These algorithms are
based on alternative definitions of regions, which are
easier to reason with than the definitions based on
control dependence.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "algorithms, languages, theory",
subject = "{\bf D.3.4}: Software, PROGRAMMING LANGUAGES,
Processors, Compilers. {\bf D.2.2}: Software, SOFTWARE
ENGINEERING, Tools and Techniques. {\bf D.3.4}:
Software, PROGRAMMING LANGUAGES, Processors,
Optimization.",
}
@Article{Beaven:1993:ETE,
author = "Mike Beaven and Ryan Stansifer",
title = "Explaining type errors in polymorphic languages",
journal = j-LOPLAS,
volume = "2",
number = "4",
pages = "17--30",
month = mar,
year = "1993",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/176454.176460",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/176460.html",
abstract = "Strongly-typed languages present programmers with
compile-time feedback about the type correctness of
programs. Errors during polymorphic type checking take
the form of a unification failure for two types.
Finding the source of the type error in the code is
often difficult because the error may occur far from
the spot where the inconsistency is detected. As
functional languages use more and more complex type
systems, the difficulty of interpreting and locating
these errors will increase. To locate the source of
type errors the programmer must unravel the long chain
of deductions and type instantiations made during type
reconstruction. This paper describes an approach that
maintains the deductive steps of type inference and the
reasons for type instantiations. The approach could be
used in an interactive system to guide the programmer
to the source of a type error or to explain why the
compiler assigned a particular type to an expression.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "languages",
subject = "{\bf F.3.3}: Theory of Computation, LOGICS AND
MEANINGS OF PROGRAMS, Studies of Program Constructs,
Type structure. {\bf D.2.6}: Software, SOFTWARE
ENGINEERING, Programming Environments. {\bf D.3.3}:
Software, PROGRAMMING LANGUAGES, Language Constructs
and Features, Data types and structures.",
}
@Article{Binkley:1993:PEI,
author = "David Binkley",
title = "Precise executable interprocedural slices",
journal = j-LOPLAS,
volume = "2",
number = "4",
pages = "31--45",
month = mar,
year = "1993",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/176454.176473",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/176473.html",
abstract = "The notion of a {\em program slice}, originally
introduced by Mark Weiser, is useful in program
debugging, automatic parallelization, program
integration, and software maintenance. A slice of a
program is taken with respect to a program point {\em
p} and a variable {\em x}; the slice consists of all
statements of the program that might affect the value
of {\em x} at point {\em p}. An interprocedural slice
is a slice of an entire program, where the slice
crosses the boundaries of procedure calls.\par
Weiser's original interprocedural-slicing algorithm
produces imprecise slices that are executable programs.
A recent algorithm developed by Horwitz, Reps, and
Binkley produces more precise (smaller) slices by more
accurately identifying those statements that might
affect the values of {\em x} at point {\em p}. These
slices, however, are not executable. An extension to
their algorithm that produces more precise executable
interprocedural slices is described together with a
proof of correctness for the new algorithm.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "algorithms, design",
subject = "{\bf D.3.3}: Software, PROGRAMMING LANGUAGES, Language
Constructs and Features, Procedures, functions, and
subroutines. {\bf D.3.3}: Software, PROGRAMMING
LANGUAGES, Language Constructs and Features, Control
structures. {\bf D.3.4}: Software, PROGRAMMING
LANGUAGES, Processors, Compilers. {\bf D.3.4}:
Software, PROGRAMMING LANGUAGES, Processors,
Optimization.",
}
@Article{Boehm:1993:IML,
author = "Hans-J. Boehm and Alan J. Demers and Chris Uhler",
title = "Implementing multiple locks using {Lamport}'s mutual
exclusion algorithm",
journal = j-LOPLAS,
volume = "2",
number = "4",
pages = "46--58",
month = mar,
year = "1993",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/176454.176479",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/176479.html",
abstract = "We describe an approach for implementing higher-level
mutual-exclusion constructs using Lamport's algorithm.
A straightforward implementation of, for example,
monitor locks based on Lamport's algorithm requires
O({\em n}) space per monitor lock if there are {\em n}
processes that may share the lock. It has occasionally
been claimed that this makes the algorithm undesirable
in practice. The insight presented here is that we only
need a (small) constant amount of space per lock, plus
O({\em n}) space shared by all locks in the system. For
some common applications, this adds no memory
references to the implementation of the algorithm.
Fully general spin-lock acquisition and release can
usually be implemented with the addition of one memory
reference to Lamport's algorithm. On particularly
unfavorable hardware, three additional memory
references may be required in the contention-free
case.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "algorithms, verification",
subject = "{\bf D.4.1}: Software, OPERATING SYSTEMS, Process
Management, Mutual exclusion. {\bf D.3.3}: Software,
PROGRAMMING LANGUAGES, Language Constructs and
Features, Concurrent programming structures.",
}
@Article{Briggs:1993:ERS,
author = "Preston Briggs and Linda Torczon",
title = "An efficient representation for sparse sets",
journal = j-LOPLAS,
volume = "2",
number = "4",
pages = "59--69",
month = mar,
year = "1993",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/176454.176484",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/hash.bib;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/176484.html",
abstract = "Sets are a fundamental abstraction widely used in
programming. Many representations are possible, each
offering different advantages. We describe a
representation that supports constant-time
implementations of {\em clear-set}, {\em add-member},
and {\em delete-member}. Additionally, it supports an
efficient {\em forall} iterator, allowing enumeration
of all the members of a set in time proportional to the
cardinality of the set.\par
We present detailed comparisons of the costs of
operations on our representation and on a bit vector
representation. Additionally, we give experimental
results showing the effectiveness of our representation
in a practical application: construction of an
interference graph for use during graph-coloring
register allocation.\par
While this representation was developed to solve a
specific problem arising in register allocation, we
have found it useful throughout our work, especially
when implementing efficient analysis techniques for
large programs. However, the new representation is not
a panacea. The operations required for a particular set
should be carefully considered before this
representation, or any other representation, is
chosen.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "algorithms; experimentation; hashing; measurement",
subject = "{\bf E.2}: Data, DATA STORAGE REPRESENTATIONS. {\bf
E.1}: Data, DATA STRUCTURES.",
}
@Article{Bumbulis:1993:RMV,
author = "Peter Bumbulis and Donald D. Cowan",
title = "{RE2C}: a more versatile scanner generator",
journal = j-LOPLAS,
volume = "2",
number = "4",
pages = "70--84",
month = mar,
year = "1993",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/176454.176487",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/176487.html",
abstract = "It is usually claimed that lexical analysis routines
are still coded by hand, despite the widespread
availability of scanner generators, for efficiency
reasons. While efficiency is a consideration, there
exist freely available scanner generators such as GLA
[Gray 1988] that can generate scanners that are faster
than most hand-coded ones. However, most generated
scanners are tailored for a particular environment, and
retargeting these scanners to other environments, if
possible, is usually complex enough to make a
hand-coded scanner more appealing. In this paper we
describe RE2C, a scanner generator that not only
generates scanners that are faster (and usually
smaller) than those produced by any other scanner
generator known to the authors, including GLA, but that
also adapt easily to any environment.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "algorithms, languages, performance",
subject = "{\bf D.3.4}: Software, PROGRAMMING LANGUAGES,
Processors, Parsing. {\bf D.3.2}: Software, PROGRAMMING
LANGUAGES, Language Classifications, Specialized
application languages.",
}
@Article{Cameron:1993:ECG,
author = "Robert D. Cameron",
title = "Extending context-free grammars with permutation
phrases",
journal = j-LOPLAS,
volume = "2",
number = "4",
pages = "85--94",
month = mar,
year = "1993",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/176454.176490",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/176490.html",
abstract = "A {\em permutation phrase} is a grammatical phrase
that specifies a syntactic construct as any permutation
of a set of constituent elements. Permutation phrases
allow for the concise and natural expression of
free-order constructs as found in programming languages
and notations such as C, Cobol, BibTEX, and Unix
command lines.\par
The conciseness and clarity of expression that
permutation phrase grammars offer over context-free
grammars are illustrated through a case study of the
declarations in C. The parsing problem for permutation
phrase grammars is considered, and it is shown how
efficient linear-time parsing can be achieved for
permutation phrase grammars satisfying an extended
notion of the LL(1) property.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "documentation, languages, theory",
subject = "{\bf D.3.1}: Software, PROGRAMMING LANGUAGES, Formal
Definitions and Theory, Syntax. {\bf D.3.4}: Software,
PROGRAMMING LANGUAGES, Processors, Parsing. {\bf
F.4.2}: Theory of Computation, MATHEMATICAL LOGIC AND
FORMAL LANGUAGES, Grammars and Other Rewriting Systems,
Grammar types. {\bf D.3.3}: Software, PROGRAMMING
LANGUAGES, Language Constructs and Features.",
}
@Article{Choudhary:1993:UCF,
author = "Alok Choudhary and Geoffrey Fox and Seema Hiranandani
and Ken Kennedy and Charles Koelbel and Sanjay Ranka
and Chau-Wen Tseng",
title = "Unified compilation of {Fortran 77D} and {90D}",
journal = j-LOPLAS,
volume = "2",
number = "4",
pages = "95--114",
month = mar,
year = "1993",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/176454.176495",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/176495.html",
abstract = "We present a unified approach to compiling Fortran 77D
and Fortran 90D programs for efficient execution of
MIMD distributed-memory machines. The integrated
Fortran D compiler relies on two key
observations. First, array constructs may be {\em
scalarized} into FORALL loops without loss of
information. Second, {\em loop fusion}, {\em
partitioning}, and {\em sectioning} optimizations are
essential for both Fortran D dialects.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "languages, performance",
subject = "{\bf D.3.4}: Software, PROGRAMMING LANGUAGES,
Processors, Optimization. {\bf C.1.2}: Computer Systems
Organization, PROCESSOR ARCHITECTURES, Multiple Data
Stream Architectures (Multiprocessors),
Multiple-instruction-stream, multiple-data-stream
processors (MIMD). {\bf C.1.2}: Computer Systems
Organization, PROCESSOR ARCHITECTURES, Multiple Data
Stream Architectures (Multiprocessors), Parallel
processors. {\bf D.3.3}: Software, PROGRAMMING
LANGUAGES, Language Constructs and Features, Concurrent
programming structures. {\bf D.3.4}: Software,
PROGRAMMING LANGUAGES, Processors, Code generation.
{\bf D.3.4}: Software, PROGRAMMING LANGUAGES,
Processors, Compilers. {\bf D.3.4}: Software,
PROGRAMMING LANGUAGES, Processors, Preprocessors. {\bf
D.3.2}: Software, PROGRAMMING LANGUAGES, Language
Classifications, FORTRAN.",
}
@Article{Day:1993:RRM,
author = "Mark Day and Barbara Liskov and Umesh Maheshwari and
Andrew C. Myers",
title = "References to remote mobile objects in {Thor}",
journal = j-LOPLAS,
volume = "2",
number = "4",
pages = "115--126",
month = mar,
year = "1993",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/176454.176500",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/176500.html",
abstract = "Thor is a distributed object-oriented database where
objects are stored persistently at highly available
servers called object repositories, or ORs. In a large
Thor system, performance tuning and system
reconfiguration dictate that objects must be able to
migrate among ORs. The paper describes two schemes for
object references that support object migration, one
using location-independent names and the other,
location-dependent names. The paper analyzes the
performance of the two schemes and concludes that
location-dependent names are the right choice for
systems like Thor, where we want fast access to objects
that have migrated.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "design",
subject = "{\bf H.2.4}: Information Systems, DATABASE MANAGEMENT,
Systems, Distributed systems. {\bf C.2.4}: Computer
Systems Organization, COMPUTER-COMMUNICATION NETWORKS,
Distributed Systems, Distributed databases. {\bf
D.4.7}: Software, OPERATING SYSTEMS, Organization and
Design, Distributed systems. {\bf E.2}: Data, DATA
STORAGE REPRESENTATIONS, Linked representations. {\bf
H.2.2}: Information Systems, DATABASE MANAGEMENT,
Physical Design.",
}
@Article{Eyre-Todd:1993:DDR,
author = "Richard A. Eyre-Todd",
title = "The detection of dangling references in {C++}
programs",
journal = j-LOPLAS,
volume = "2",
number = "4",
pages = "127--134",
month = mar,
year = "1993",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/176454.176504",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/176504.html",
abstract = "The {\em smart pointer} is a programming technique for
the C++ language that extends the functionality of the
simple pointer. Smart pointers have previously been
used to support persistence, distributed objects,
reference counting, and garbage collection. This
article will show how smart pointers can provide an
inexpensive method for detecting dangling pointers to
dynamic objects that can be added to any standard C++
implementation.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "languages, reliability",
subject = "{\bf D.3.3}: Software, PROGRAMMING LANGUAGES, Language
Constructs and Features, Dynamic storage management.
{\bf D.1.5}: Software, PROGRAMMING TECHNIQUES,
Object-oriented Programming. {\bf D.2.5}: Software,
SOFTWARE ENGINEERING, Testing and Debugging, Debugging
aids. {\bf D.3.3}: Software, PROGRAMMING LANGUAGES,
Language Constructs and Features, Data types and
structures.",
}
@Article{Gupta:1993:OAB,
author = "Rajiv Gupta",
title = "Optimizing array bound checks using flow analysis",
journal = j-LOPLAS,
volume = "2",
number = "4",
pages = "135--150",
month = mar,
year = "1993",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/176454.176507",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/176507.html",
abstract = "Bound checks are introduced in programs for the
run-time detection of array bound violations.
Compile-time optimizations are employed to reduce the
execution-time overhead due to bound checks. The
optimizations reduce the program execution time through
{\em elimination} of checks and {\em propagation} of
checks out of loops. An execution of the optimized
program terminates with an array bound violation if and
only if the same outcome would have resulted during the
execution of the program containing all array bound
checks. However, the point at which the array bound
violation occurs may not be the same. Experimental
results indicate that the number of bound checks
performed during the execution of a program is greatly
reduced using these techniques.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "languages, reliability",
subject = "{\bf D.3.4}: Software, PROGRAMMING LANGUAGES,
Processors, Optimization. {\bf D.2.5}: Software,
SOFTWARE ENGINEERING, Testing and Debugging, Error
handling and recovery. {\bf D.3.4}: Software,
PROGRAMMING LANGUAGES, Processors, Compilers.",
}
@Article{Kaser:1993:CID,
author = "Owen Kaser and C. R. Ramakrishnan and Shaunak Pawagi",
title = "On the conversion of indirect to direct recursion",
journal = j-LOPLAS,
volume = "2",
number = "4",
pages = "151--164",
month = mar,
year = "1993",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/176454.176510",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/176510.html",
abstract = "Procedure inlining can be used to convert mutual
recursion to direct recursion. This allows use of
optimization techniques that are most easily applied to
directly recursive procedures, in addition to the
well-known benefits of inlining. We present tight
(necessary {\em and} sufficient) conditions under which
inlining can transform all mutual recursion to direct
recursion, and those under which heuristics to
eliminate mutual recursion always terminate. We also
present a technique to eliminate mutually recursive
circuits that consist of only tail calls. From this, we
conclude that tail recursion elimination should be
interleaved with inlining.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "algorithms, languages, performance",
subject = "{\bf D.3.3}: Software, PROGRAMMING LANGUAGES, Language
Constructs and Features, Procedures, functions, and
subroutines. {\bf D.3.4}: Software, PROGRAMMING
LANGUAGES, Processors, Compilers. {\bf D.3.4}:
Software, PROGRAMMING LANGUAGES, Processors,
Optimization.",
}
@Article{Larus:1993:CSM,
author = "James R. Larus",
title = "Compiling for shared-memory and message-passing
computers",
journal = j-LOPLAS,
volume = "2",
number = "4",
pages = "165--180",
month = mar,
year = "1993",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/176454.176514",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/176514.html",
abstract = "Many parallel languages presume a shared address space
in which any portion of a computation can access any
datum. Some parallel computers directly support this
abstraction with hardware shared memory. Other
computers provide distinct (per-processor) address
spaces and communication mechanisms on which software
can construct a shared address space. Since programmers
have difficulty explicitly managing address spaces,
there is considerable interest in compiler support for
shared address spaces on the widely available
message-passing computers.\par
At first glance, it might appear that
hardware-implemented shared memory is unquestionably a
better base on which to implement a language. This
paper argues, however, that compiler-implemented shared
memory, despite its short-comings, has the potential to
exploit more effectively the resources in a parallel
computer. Hardware designers need to find mechanisms to
combine the advantages of both approaches in a single
system.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "languages, performance",
subject = "{\bf D.3.4}: Software, PROGRAMMING LANGUAGES,
Processors, Compilers. {\bf B.3.2}: Hardware, MEMORY
STRUCTURES, Design Styles, Shared memory. {\bf C.1.2}:
Computer Systems Organization, PROCESSOR ARCHITECTURES,
Multiple Data Stream Architectures (Multiprocessors),
Multiple-instruction-stream, multiple-data-stream
processors (MIMD). {\bf C.1.2}: Computer Systems
Organization, PROCESSOR ARCHITECTURES, Multiple Data
Stream Architectures (Multiprocessors), Parallel
processors. {\bf D.1.3}: Software, PROGRAMMING
TECHNIQUES, Concurrent Programming, Parallel
programming. {\bf D.3.4}: Software, PROGRAMMING
LANGUAGES, Processors, Optimization. {\bf D.3.4}:
Software, PROGRAMMING LANGUAGES, Processors, Run-time
environments.",
}
@Article{Marriott:1993:PEG,
author = "Kim Marriott and Harald S{\o}ndergaard",
title = "Precise and efficient groundness analysis for logic
programs",
journal = j-LOPLAS,
volume = "2",
number = "4",
pages = "181--196",
month = mar,
year = "1993",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/176454.176519",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/176519.html",
abstract = "We show how precise groundness information can be
extracted from logic programs. The idea is to use
abstract interpretation with Boolean functions as
``approximations'' to groundness dependencies between
variables. This idea is not new, and different classes
of Boolean functions have been used. We argue, however,
that one class, the {\em positive} functions, is more
suitable than others. Positive Boolean functions have a
certain property which we (inspired by A. Langen) call
``condensation.'' This property allows for rapid
computation of groundness information.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "languages, theory",
subject = "{\bf F.3.1}: Theory of Computation, LOGICS AND
MEANINGS OF PROGRAMS, Specifying and Verifying and
Reasoning about Programs, Assertions. {\bf D.1.6}:
Software, PROGRAMMING TECHNIQUES, Logic Programming.
{\bf D.3.4}: Software, PROGRAMMING LANGUAGES,
Processors, Compilers. {\bf D.3.4}: Software,
PROGRAMMING LANGUAGES, Processors, Optimization. {\bf
F.3.1}: Theory of Computation, LOGICS AND MEANINGS OF
PROGRAMS, Specifying and Verifying and Reasoning about
Programs, Invariants. {\bf F.3.1}: Theory of
Computation, LOGICS AND MEANINGS OF PROGRAMS,
Specifying and Verifying and Reasoning about Programs,
Logics of programs.",
}
@Article{Marriott:1993:SCL,
author = "Kim Marriott and Peter J. Stuckey",
title = "Semantics of constraint logic programs with
optimization",
journal = j-LOPLAS,
volume = "2",
number = "4",
pages = "197--212",
month = mar,
year = "1993",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/176454.176522",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/176522.html",
abstract = "Many applications of constraint logic programming
(CLP) languages require not only testing if a set of
constraints is satisfiable, but also finding the
optimal solution which satisfies them. Unfortunately,
the standard declarative semantics for CLP languages
does not consider optimization but only constraint
satisfaction. Here we give a model theoretic semantics
for optimization, which is a simple extension of the
standard semantics, and a corresponding operational
semantics, which may be efficiently implemented.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "languages, theory",
subject = "{\bf F.4.1}: Theory of Computation, MATHEMATICAL LOGIC
AND FORMAL LANGUAGES, Mathematical Logic, Logic
programming. {\bf D.1.6}: Software, PROGRAMMING
TECHNIQUES, Logic Programming. {\bf F.3.2}: Theory of
Computation, LOGICS AND MEANINGS OF PROGRAMS, Semantics
of Programming Languages, Operational semantics.",
}
@Article{Metzger:1993:ICP,
author = "Robert Metzger and Sean Stroud",
title = "Interprocedural constant propagation: an empirical
study",
journal = j-LOPLAS,
volume = "2",
number = "4",
pages = "213--232",
month = mar,
year = "1993",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/176454.176526",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/176526.html",
abstract = "Constant propagation is an optimization that
substitutes values for names. Interprocedural constant
propagation performs this substitution throughout an
entire program, propagating constant values across
procedure boundaries. CONVEX Computer Corporation has
developed an interprocedural optimizer for imperative
languages, which is available to users of its C-series
supercomputers. An aggressive interprocedural constant
propagation algorithm, such as the one implemented in
this optimizer, can find many constants to propagate
into procedures in scientific FORTRAN applications.
Detailed statistics derived from compiling a large set
of real scientific applications characterize both the
opportunities for interprocedural constant propagation
in these codes and the effectiveness of algorithm
described. These statistics include the frequency of
interprocedural constant propagation, the different
types of interprocedural constant propagation, which
constants are most frequently propagated, and the
relationship between interprocedural constant
propagation and other interprocedural optimizations.",
acknowledgement = ack-nhfb,
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "algorithms, languages, performance",
subject = "{\bf D.3.4}: Software, PROGRAMMING LANGUAGES,
Processors, Optimization. {\bf D.3.3}: Software,
PROGRAMMING LANGUAGES, Language Constructs and
Features, Data types and structures. {\bf D.3.3}:
Software, PROGRAMMING LANGUAGES, Language Constructs
and Features, Procedures, functions, and subroutines.
{\bf D.3.4}: Software, PROGRAMMING LANGUAGES,
Processors, Code generation. {\bf D.3.4}: Software,
PROGRAMMING LANGUAGES, Processors, Compilers. {\bf
I.2.2}: Computing Methodologies, ARTIFICIAL
INTELLIGENCE, Automatic Programming, Program
transformation.",
}
@Article{Qian:1993:RON,
author = "Xiaolei Qian and Allen Goldberg",
title = "Referential opacity in nondeterministic data
refinement",
journal = j-LOPLAS,
volume = "2",
number = "4",
pages = "233--241",
month = mar,
year = "1993",
CODEN = "ALPSE8",
DOI = "https://doi.org/10.1145/176454.176578",
ISSN = "1057-4514 (print), 1557-7384 (electronic)",
ISSN-L = "1057-4514",
bibdate = "Thu May 30 15:54:54 MDT 1996",
bibsource = "http://www.acm.org/pubs/toc/;
http://www.math.utah.edu/pub/tex/bib/loplas.bib",
URL = "http://www.acm.org/pubs/toc/Abstracts/1057-4514/176578.html",
abstract = "Data refinement is the transformation in a program of
one data type to another. With the obvious
formalization of nondeterministic data types in
equational logic however, many desirable
nondeterministic data refinements are impossible to
prove correct. Furthermore, it is difficult to have a
monotonic notion of refinement. We propose an
alternative formalization of nondeterministic data
types, in which the requirement of referential
transparency applies only to deterministic operators.
We show how the above-mentioned problems can be solved
with our approach.",
fjournal = "ACM Letters on Programming Languages and Systems
(LOPLAS)",
keywords = "languages, theory, verification",
subject = "{\bf D.3.3}: Software, PROGRAMMING LANGUAGES, Language
Constructs and Features, Abstract data types. {\bf
D.2.4}: Software, SOFTWARE ENGINEERING, Program
Verification, Correctness proofs. {\bf F.3.2}: Theory
of Computation, LOGICS AND MEANINGS OF PROGRAMS,
Semantics of Programming Languages, Algebraic
approaches to semantics.",
}