[1]

Amir M. BenAmram and Samir Genaim.
On MultiphaseLinear Ranking Functions.
In Viktor Kuncak and Rupak Majumdar, editors, Proceedings of the
29th International Conference on Computer Aided Verification (CAV'17),
Lecture Notes in Computer Science. Springer, July 2017.
[ bibtex 
abstract ]
Multiphase ranking functions (MΦRF) were
proposed as a means to prove the termination of a
loop in which the computation progresses through a
number of "phases", and the progress of each phase
is described by a different linear ranking function.
Our work provides new insights regarding such
functions for loops described by a conjunction of
linear constraints (singlepath loops). We provide
a complete polynomialtime solution to the problem
of existence and of synthesis of MΦRF
of bounded depth (number of phases), when variables
range over rational or real numbers; a complete
solution for the (harder) case that variables are
integer, with a matching lowerbound proof, showing
that the problem is coNPcomplete; and a new theorem
which bounds the number of iterations for loops with
MΦRFs. Surprisingly, the bound is
linear, even when the variables involved change in
nonlinear way. We also consider a type of
lexicographic ranking functions more
expressive than types of lexicographic functions for
which complete solutions have been given so far. We
prove that for the above type of loops,
lexicographic functions can be reduced to
MΦRFs, and thus the questions of
complexity of detection, synthesis, and iteration
bounds are also answered for this class.
@inproceedings{BenAmramG17,
author = {Amir M. BenAmram and Samir Genaim},
title = {{O}n {M}ultiphase{L}inear {R}anking {F}unctions},
editor = {Viktor Kuncak and Rupak Majumdar},
booktitle = {Proceedings of the 29th International Conference on Computer Aided Verification (CAV'17)},
year = {2017},
month = jul,
series = {Lecture Notes in Computer Science},
publisher = {Springer}
}

[2]

Amir M. BenAmram and Samir Genaim.
Complexity of BradleyMannaSipma Lexicographic Ranking
Functions.
In Daniel Kroening and Corina Pasareanu, editors, Proceedings of
the 27th International Conference on Computer Aided Verification (CAV'15),
volume 9207 of Lecture Notes in Computer Science, pages 304321.
Springer, July 2015.
[ bibtex 
abstract 
DOI ]
In this paper we turn the spotlight on a class of
lexicographic ranking functions introduced by
Bradley, Manna and Sipma in a seminal CAV 2005
paper, and establish for the first time the
complexity of some problems involving the inference
of such functions for linearconstraint loops
(without precondition). We show that finding such a
function, if one exists, can be done in polynomial
time in a way which is sound and complete when the
variables range over the rationals (or reals). We
show that when variables range over the integers,
the problem is harderdeciding the existence of a
ranking function is coNPcomplete. Next, we study
the problem of minimizing the number of components
in the ranking function (a.k.a. the dimension). This
number is interesting in contexts like computing
iteration bounds and loop
parallelization. Surprisingly, and unlike the
situation for some other classes of lexicographic
ranking functions, we find that even deciding
whether a twocomponent ranking function exists is
harder than the unrestricted problem: NPcomplete
over the rationals and Σ^{P}_{2}complete over
the integers.
@inproceedings{BenAmramG15,
author = {Amir M. BenAmram and Samir Genaim},
title = {{C}omplexity of {B}radley{M}anna{S}ipma {L}exicographic {R}anking {F}unctions},
editor = {Daniel Kroening and Corina Pasareanu},
booktitle = {Proceedings of the 27th International Conference on Computer Aided Verification (CAV'15)},
year = {2015},
month = jul,
pages = {304321},
volume = {9207},
series = {Lecture Notes in Computer Science},
doi = {10.1007/9783319216683_18},
isbn = {9783319216676},
issn = {03029743},
publisher = {Springer}
}

[3]

Pierre Ganty, Samir Genaim, Ratan Lal, and Pavithra Prabhakar.
From NonZenoness Verification to Termination.
In 13. ACM/IEEE International Conference on Formal Methods and
Models for Codesign (MEMOCODE 2015), pages 228237. IEEE, 2015.
[ bibtex 
abstract 
DOI ]
We investigate the problem of verifying the absence
of zeno executions in a hybrid system. A zeno
execution is one in which there are infinitely many
discrete transitions in a finite time interval. The
presence of zeno executions poses challenges towards
implementation and analysis of hybrid control
systems. We present a simple transformation of the
hybrid system which reduces the nonzenoness
verification problem to the termination verification
problem, that is, the original system has no zeno
executions if and only if the transformed system has
no nonterminating executions. This provides both
theoretical insights and practical techniques for
nonzenoness verification. Further, it also
provides techniques for isolating parts of the
hybrid system and its initial states which do not
exhibit zeno executions. We illustrate the
feasibility of our approach by applying it on hybrid
system examples.
@inproceedings{GantyGLP15,
author = {Pierre Ganty and Samir Genaim and Ratan Lal and Pavithra Prabhakar},
title = {{F}rom {N}on{Z}enoness {V}erification to {T}ermination},
booktitle = {13. {ACM/IEEE} International Conference on Formal Methods and Models for Codesign ({MEMOCODE} 2015)},
pages = {228237},
year = {2015},
doi = {10.1109/MEMCOD.2015.7340490},
publisher = {{IEEE}},
year = {2015},
isbn = {9781509002375}
}

[4]

Elvira Albert, Samir Genaim, and Pablo Gordillo.
MayHappeninParallel Analysis for Asynchronous Programs
with InterProcedural Synchronization.
In Sandrine Blazy and Thomas Jensen, editors, Proceedings of the
22nd International Symposium on Static Analysis (SAS'15), volume 9291 of
Lecture Notes in Computer Science, pages 7289. Springer, 2015.
[ bibtex 
abstract 
DOI ]
Abstract. A mayhappeninparallel (MHP) analysis
computes pairs of program points that may execute in
parallel across different distributed
components. This information has been proven to be
essential to infer both safety properties (e.g.,
deadlock freedom) and liveness properties
(termination and resource boundedness) of
asynchronous programs. Existing MHP analyses take
advantage of the synchronization points to learn
that one task has finished and thus will not happen
in parallel with other tasks that are still
active. Our starting point is an existing MHP
analysis developed for intraprocedural
synchronization, i.e., it only al lows
synchronizing with tasks that have been spawned
inside the current task. This paper leverages such
MHP analysis to handle interprocedural
synchronization, i.e., a task spawned by one task
can be awaited within a different task. This is
challenging because task synchronization goes beyond
the boundaries of methods, and thus the inference of
MHP relations requires novel extensions to capture
interprocedural dependencies. The analysis has
been implemented and it can be tried online.
@inproceedings{AlbertGG15,
author = {Elvira Albert and Samir Genaim and Pablo Gordillo},
title = {{M}ay{H}appenin{P}arallel {A}nalysis for {A}synchronous {P}rograms with {I}nter{P}rocedural {S}ynchronization},
editor = {Sandrine Blazy and Thomas Jensen},
booktitle = {Proceedings of the 22nd International Symposium on Static Analysis (SAS'15)},
series = {Lecture Notes in Computer Science},
volume = {9291},
publisher = {Springer},
pages = {7289},
doi = {10.1007/9783662482889_5},
isbn = {9783662482872},
issn = {03029743},
year = {2015},
issn = {03029743}
}

[5]

Elvira Albert, Puri Arenas, Jesús Correas, Samir Genaim, Miguel GómezZamalloa, Enrique MartinMartin, Germán Puebla, and
Guillermo RománDíez.
Rsource Analysis: From Sequential to Concurrent and
Distributed Programs.
In Nikolaj Bjørner and Frank D. de Boer, editors, 20th
International Symposium on Formal Methods (FM'15), volume 9109, pages 317.
Springer, 2015.
[ bibtex 
abstract 
DOI ]
Resource analysis aims at automatically inferring
upper/lower bounds on the worst/bestcase cost of
executing programs. Ideally, a resource analyzer
should be parametric on the cost model, i.e., the
type of cost that the user wants infer (e.g., number
of steps, amount of memory allocated, amount of data
transmitted, etc.). The inferred upper bounds have
important applications in the fields of program
optimization, verification and certification. In
this talk, we will review the basic techniques used
in resource analysis of sequential programs and the
new extensions needed to handle concurrent and
distributed systems.
@inproceedings{AlbertACGGMPR15,
author = {Elvira Albert and
Puri Arenas and
Jes{\'{u}}s Correas and
Samir Genaim and
Miguel G{\'{o}}mez{}Zamalloa and
Enrique Martin{}Martin and
Germ{\'{a}}n Puebla and
Guillermo Rom{\'{a}}n{}D{\'{\i}}ez},
title = {{R}source {A}nalysis: {F}rom {S}equential to {C}oncurrent and {D}istributed {P}rograms},
editor = {Nikolaj Bj{\o}rner and Frank D. de Boer},
booktitle = {20th International Symposium on Formal Methods (FM'15)},
pages = {317},
year = {2015},
volume = {9109},
publisher = {Springer},
isbn = {9783319192482},
issn = {03029743},
doi = {10.1007/9783319192499_1}
}

[6]

Elvira Albert, Samir Genaim, and Raúl Gutiérrez.
A Transformational Approach to Resource Analysis with
TypedNorms.
In Gopal Gupta and Ricardo Peña, editors, Proceedings of the
23rd International Symposium on Logicbased Program Synthesis and
Transformation (LOPSTR'13), volume 8901 of Lecture Notes in Computer
Science, pages 3853. Springer, 2014.
[ bibtex 
abstract 
DOI ]
In order to automatically infer the resource
consumption of programs, analyzers track how
data sizes change along program's execution.
Typically, analyzers measure the sizes of data by
applying norms which are mappings from data
to natural numbers that represent the sizes of the
corresponding data. When norms are defined by taking
type information into account, they are named
typednorms. The main contribution of this
extended abstract is a transformational approach to
resource analysis with typednorms. The analysis is
based on a transformation of the program into an
intermediate abstract program in which each
variable is abstracted with respect to all
considered norms which are valid for its type. We
also sketch a simple analysis that can be used to
automatically infer the required, useful,
typednorms from programs.
@inproceedings{AlbertGG14,
author = {Elvira Albert and Samir Genaim and Ra{\'u}l Guti{\'e}rrez},
title = {{A} {T}ransformational {A}pproach to {R}esource {A}nalysis with {T}yped{N}orms},
booktitle = {Proceedings of the 23rd International Symposium on Logicbased Program Synthesis and Transformation (LOPSTR'13)},
editor = {Gupta, Gopal and Pe{\~n}a, Ricardo},
year = {2014},
pages = {3853},
volume = {8901},
series = {Lecture Notes in Computer Science},
doi = {10.1007/9783319141251_3},
publisher = {Springer},
isbn = {9783319141244},
issn = {03029743}
}

[7]

Elvira Albert, Samir Genaim, and Enrique MartinMartin.
MayHappeninParallel Analysis for Prioritybased
Scheduling.
In Ken McMillan, Aart Middeldorp, and Andrei Voronkov, editors,
Proceedings of the 19th International Conference on Logic for Programming
Artificial Intelligence and Reasoning (LPAR19), volume 8312 of Lecture
Notes in Computer Science, pages 1834. Springer, December 2013.
[ bibtex 
abstract 
DOI ]
A mayhappeninparallel (MHP) analysis infers the
sets of pairs of program points that may execute in
parallel along a program's execution. This is an
essential piece of information to detect data races,
and also to infer more complex properties of
concurrent programs, e.g., deadlock freeness,
termination and resource consumption analyses can
greatly benefit from the MHP relations to increase
their accuracy. Previous MHP analyses have assumed a
worst case scenario by adopting a simplistic
(nondeterministic) task scheduler which can select
any available task. While the results of the
analysis for a nondeterministic scheduler are
obviously sound, they can lead to an overly
pessimistic result. We present an MHP analysis for
an asynchronous language with prioritized tasks
buffers. Prioritybased scheduling is arguably the
most common scheduling strategy adopted in the
implementation of concurrent languages. The
challenge is to be able to take task priorities into
account at static analysis time in order to filter
out unfeasible MHP pairs.
@inproceedings{AlbertGM13b,
author = {Elvira Albert and Samir Genaim and Enrique MartinMartin},
title = {May{H}appenin{P}arallel {A}nalysis for {P}rioritybased {S}cheduling},
editor = {McMillan, Ken and Middeldorp, Aart and Voronkov, Andrei},
booktitle = {Proceedings of the 19th International Conference on Logic for Programming Artificial Intelligence and Reasoning (LPAR19)},
volume = {8312},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
isbn = {9783642452208},
issn = {03029743},
month = dec,
year = {2013},
pages = {1834},
doi = {10.1007/9783642452215_2}
}

[8]

Elvira Albert, Antonio FloresMontoya, Samir Genaim, and Enrique MartinMartin.
Termination and Cost Analysis of Loops with Concurrent Interleavings.
In Dang Van Hung and Mizuhito Ogawa, editors, Proceedings of the
11th International Symposium on Automated Technology for Verification and
Analysis (ATVA'13), volume 8172 of Lecture Notes in Computer Science,
pages 349364. Springer, 2013.
[ bibtex 
abstract 
DOI ]
By following a relyguarantee style of reasoning, we present
a novel termination analysis for concurrent programs that, in order to
prove termination of a considered loop, makes the assumption that the
"shareddata that is involved in the termination proof of the loop is
modified a finite number of times". In a subsequent step, it proves that
this assumption holds in all code whose execution might interleave with such
loop. At the core of the analysis, we use a mayhappeninparallel analysis
to restrict the set of program points whose execution can interleave with
the considered loop. Interestingly, the same kind of reasoning can
be applied to infer upper bounds on the number of iterations of loops
with concurrent interleavings. To the best of our knowledge, this is the
first method to automatically bound the cost of such kind of loops.
@inproceedings{AlbertFGM13,
author = {Elvira Albert and Antonio FloresMontoya and Samir Genaim and Enrique MartinMartin},
title = {Termination and Cost Analysis of Loops with Concurrent Interleavings},
booktitle = {Proceedings of the 11th International Symposium on Automated Technology for Verification and Analysis (ATVA'13)},
editor = {Dang Van Hung and Mizuhito Ogawa},
pages = {349364},
volume = {8172},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
year = {2013},
doi = {10.1007/9783319024448_25},
isbn = {9783319024431},
issn = {03029743}
}

[9]

Diego Esteban Alonso Blas, Puri Arenas, and Samir Genaim.
Precise Cost Analysis via Local Reasoning.
In Dang Van Hung and Mizuhito Ogawa, editors, Proceedings of the
11th International Symposium on Automated Technology for Verification and
Analysis (ATVA'13), volume 8172 of Lecture Notes in Computer Science,
pages 319333. Springer, 2013.
[ bibtex 
abstract 
DOI ]
The classical approach to static cost analysis is based on first
transforming a given program into a set of cost relations, and then solving
them into closedform upperbounds.
The quality of the upperbounds and the scalability of such cost
analysis highly depend on the precision and efficiency of the
solving phase.
Several techniques for solving cost relations exist, some are
efficient but not precise enough, and some are very precise but do
not scale to large cost relations.
In this paper we explore the gap between these techniques, seeking
for ones that are both precise and efficient.
In particular, we propose a novel technique that first splits the
cost relation into several atomic ones, and then uses precise
local reasoning for some and less precise but efficient reasoning for
others.
For the precise local reasoning, we propose several methods that
define the cost as a solution of a universally quantified formula.
Preliminary experiments demonstrate the effectiveness of our
approach.
@inproceedings{AlonsoAG13,
author = {Diego Esteban Alonso Blas and Puri Arenas and Samir Genaim},
title = {{P}recise {C}ost {A}nalysis via {L}ocal {R}easoning},
booktitle = {Proceedings of the 11th International Symposium on Automated Technology for Verification and Analysis (ATVA'13)},
editor = {Dang Van Hung and Mizuhito Ogawa},
pages = {319333},
volume = {8172},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
year = {2013},
isbn = {9783642452208},
issn = {03029743},
doi = {10.1007/9783319024448_23}
}

[10]

Antonio FloresMontoya, Elvira Albert, and Samir Genaim.
MayHappeninParallel based Deadlock Analysis for
Concurrent Objects.
In Dirk Beyer and Michele Boreale, editors, Proceedings of the
International Conference on Formal Techniques for Distributed Systems
(FMOODS/FORTE'13), volume 7892 of Lecture Notes in Computer Science,
pages 273288. Springer, 2013.
[ bibtex 
abstract 
DOI ]
We present a novel deadlock analysis for concurrent objects based on
the results inferred by a pointsto analysis and a
mayhappeninparallel (MHP) analysis. Similarly to other analysis,
we build a dependency graph such that the absence of cycles
in the graph ensures deadlock freeness. An MHP
analysis provides an overapproximation of the pairs of program
points that may be running in parallel. The crux of the method is
that the analysis integrates the MHP information within the dependency
graph in order to discard unfeasible cycles that otherwise would
lead to false positives. We argue that our analysis is more precise
and/or efficient than previous proposals for deadlock analysis of
concurrent objects. As regards accuracy, we are able to handle cases
that other analyses have pointed out as challenges. As regards
efficiency, the complexity of our deadlock analysis is polynomial.
@inproceedings{FloresAG13,
author = {Antonio FloresMontoya and Elvira Albert and Samir Genaim},
title = {{M}ay{H}appenin{P}arallel based {D}eadlock {A}nalysis for {C}oncurrent {O}bjects},
booktitle = {Proceedings of the International Conference on Formal Techniques for Distributed Systems (FMOODS/FORTE'13)},
editor = {Dirk Beyer and Michele Boreale},
series = {Lecture Notes in Computer Science},
year = {2013},
pages = {273288},
volume = {7892},
isbn = {9783642385919},
issn = {03029743},
doi = {10.1007/9783642385926_19},
publisher = {Springer}
}

[11]

Pierre Ganty and Samir Genaim.
Proving Termination Starting from the End.
In Natasha Sharygina and Helmut Veith, editors, Proceedings of
the 25th International Conference on Computer Aided Verification (CAV'13),
volume 8044 of Lecture Notes in Computer Science, pages 397412.
Springer, 2013.
[ bibtex 
abstract 
DOI 
arXiv ]
We present a novel technique for proving program
termination which introduces a new dimension of
modularity. Existing techniques use the program to
incrementally construct a termination proof. While
the proof keeps changing, the program remains the
same. Our technique goes a step further. We show
how to use the current partial proof to partition
the transition relation into those behaviors known
to be terminating from the current proof, and those
whose status (terminating or not) is not known
yet. This partition enables a new and unexplored
dimension of incremental reasoning on the program
side. In addition, we show that our approach
naturally applies to conditional termination which
searches for a precondition ensuring termination.
We further report on a prototype implementation that
advances the stateoftheart on the grounds of
termination and conditional termination.
@inproceedings{GantyG13,
author = {Pierre Ganty and Samir Genaim},
title = {{P}roving {T}ermination {S}tarting from the {E}nd},
editor = {Natasha Sharygina and Helmut Veith},
booktitle = {Proceedings of the 25th International Conference on Computer Aided Verification (CAV'13)},
year = {2013},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
pages = {397412},
volume = {8044},
arxiv = {http://arxiv.org/abs/1302.4539},
doi = {10.1007/9783642397998_27},
isbn = {9783642397981},
issn = {03029743}
}

[12]

Amir M. BenAmram and Samir Genaim.
On the Linear Ranking Problem for Integer
LinearConstraint Loops.
In Roberto Giacobazzi and Radhia Cousot, editors, Proceedings of
the 40th Annual ACM SIGPLANSIGACT Symposium on Principles of Programming
Languages (POPL'13), pages 5162, 2013.
Nominated for CACM Research Highlights
http://www.sigplan.org/Newsletters/CACM/Papers.
[ bibtex 
abstract 
DOI ]
In this paper we study the complexity of the Linear
Ranking problem: given a loop, described by linear
constraints over a finite set of integer variables,
is there a linear ranking function for this loop?
While existence of such a function implies
termination, this problem is not equivalent to
termination. When the variables range over the
rationals or reals, the Linear Ranking problem is
known to be PTIME decidable. However, when they
range over the integers, whether for singlepath or
multipath loops, the decidability of the Linear
Ranking problem has not yet been determined. We show
that it is decidable, but harder than the former
case: it is coNPcomplete. However, we point out
some special cases of importance of PTIME
complexity. We also present complete algorithms for
synthesizing linear ranking functions, both for the
general case and the special PTIME cases.
@inproceedings{BenAmramG13,
author = {Amir M. BenAmram and Samir Genaim},
title = {{O}n the {L}inear {R}anking {P}roblem for {I}nteger {L}inear{C}onstraint {}Loops},
editor = {Roberto Giacobazzi and Radhia Cousot},
booktitle = {Proceedings of the 40th Annual ACM SIGPLANSIGACT Symposium on Principles
of Programming Languages (POPL'13)},
year = {2013},
pages = {5162},
doi = {10.1145/2429069.2429078},
note = {Nominated for CACM Research Highlights \url{http://www.sigplan.org/Newsletters/CACM/Papers}}
}

[13]

Diego Esteban Alonso Blas and Samir Genaim.
On the Limits of the Classical Approach to Cost Analysis.
In Antoine Miné and David Schmidt, editors, Proceedings of
the 19th International Symposium of Static Analysis (SAS'12), volume 7460 of
Lecture Notes in Computer Science, pages 405421. Springer, 2012.
[ bibtex 
abstract 
DOI ]
The classical approach to static cost analysis is
based on transforming a given program into cost
relations and solving them into closedform
upperbounds. It is known that for some programs,
this approach infers upperbounds that are
asymptotically less precise than the actual cost.
As yet, it was assumed that this imprecision is due
to the way cost relations are solved into
upperbounds. In this paper: (1) we show that this
assumption is partially true, and identify the
reason due to which cost relations cannot precisely
model the cost of such programs; and (2) to overcome
this imprecision, we develop a new approach to cost
analysis, based on SMT and quantifier elimination.
Interestingly, we find a strong relation between our
approach and amortised cost analysis.
@inproceedings{AlonsoG12,
author = {Diego Esteban Alonso Blas and Samir Genaim},
title = {{O}n the {L}imits of the {C}lassical {A}pproach to {C}ost {A}nalysis},
booktitle = {Proceedings of the 19th International Symposium of Static Analysis (SAS'12)},
editor = {Antoine Min{\'e} and David Schmidt},
year = 2012,
pages = {405421},
doi = {10.1007/9783642331251_27},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = 7460,
isbn = {9783642331244},
issn = {03029743}
}

[14]

Elvira Albert, Antonio FloresMontoya, and Samir Genaim.
Analysis of MayHappeninParallel in Concurrent Objects.
In Holger Giese and Grigore Rosu, editors, Proceedings of the
International Conference on Formal Techniques for Distributed Systems
(FMOODS/FORTE'12), volume 7273 of Lecture Notes in Computer Science,
pages 3551. Springer, 2012.
[ bibtex 
abstract 
DOI ]
This paper presents a mayhappeninparallel
(MHP) analysis for OO languages based on
concurrent objects. In this concurrency
model, objects are the concurrency units such
that, when a method is invoked on an object o_{2}
from a task executing on object o_{1}, statements of
the current task in o_{1} may run in parallel with
those of the (asynchronous) call on o_{2}, and with
those of transitively invoked methods. The goal of
the MHP analysis is to identify pairs of statements
in the program that may run in parallel in any
execution. Our MHP analysis is formalized as a
methodlevel (local) analysis whose
information can be modularly composed to obtain
applicationlevel (global) information.
@inproceedings{AlbertFG12a,
author = {Elvira Albert and Antonio FloresMontoya and Samir Genaim},
title = {{A}nalysis of {M}ay{H}appenin{P}arallel in {C}oncurrent {O}bjects},
editor = {Holger Giese and Grigore Rosu},
booktitle = {Proceedings of the International Conference on Formal Techniques for Distributed Systems (FMOODS/FORTE'12)},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = 7273,
year = 2012,
isbn = {9783642307928},
issn = {03029743},
pages = {3551},
doi = {10.1007/9783642307935_3}
}

[15]

Elvira Albert, Richard Bubel, Samir Genaim, Reiner Hähnle, and Guillermo RománDíez.
Verified Resource Guarantees for Heap Manipulating
Programs.
In Juan de Lara and Andrea Zisman, editors, Proceedings of the
15th International Conference on Fundamental Approaches to Software
Engineering (FASE'12), volume 7212 of Lecture Notes in Computer
Science, pages 130145. Springer, 2012.
[ bibtex 
abstract 
DOI ]
Program properties that are automatically inferred by
static analysis tools are generally not considered
to be completely trustworthy, unless the tool
implementation or the results are formally
verified. Here we focus on the formal verification
of resource guarantees inferred by automatic cost
analysis. Resource guarantees ensure that programs
run within the indicated amount of resources which
may refer to memory consumption, to number of
instructions executed, etc. In previous work we
studied formal verification of inferred resource
guarantees that depend only on integer data. In
realistic programs, however, resource consumption is
often bounded by the size of heapallocated data
structures. Bounding their size requires to perform
a number of structural heap analyses. The
contributions of this paper are (i) to identify what
exactly needs to be verified to guarantee sound
analysis of heap manipulating programs, (ii) to
provide a suitable extension of the program logic
used for verification to handle structural heap
properties in the context of resource guarantees,
and (iii) to improve the underlying theorem prover
so that proof obligations can be automatically
discharged.
@inproceedings{AlbertBGHR12,
author = {Elvira Albert and
Richard Bubel and
Samir Genaim and
Reiner H{\"a}hnle and
Guillermo Rom{\'a}nD\'{\i}ez},
title = {{V}erified {R}esource {G}uarantees for {H}eap
{M}anipulating {P}rograms},
editor = {Juan de Lara and Andrea Zisman},
booktitle = {Proceedings of the 15th International Conference on Fundamental Approaches to Software Engineering (FASE'12)},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = 7212,
year = 2012,
isbn = {9783642288715},
issn = {03029743},
pages = {130145},
doi = {10.1007/9783642288722_10}
}

[16]

Elvira Albert, Puri Arenas, Samir Genaim, Miguel GómezZamalloa, and
Germán Puebla.
Automatic Inference of Resource Consumption Bounds.
In Nikolaj Bjørner and Andrei Voronkov, editors, Proceedings
of the 18th International Conference on Logic for Programming, Artificial
Intelligence, and Reasoning (LPAR18), volume 7180 of Lecture Notes in
Computer Science, pages 111. Springer, 2012.
[ bibtex 
abstract 
DOI ]
One of the main features of programs is the amount of
resources which are needed in order to run
them. Different resources can be taken into
consideration, such as the number of execution
steps, amount of memory allocated, number of calls
to certain methods, etc. Unfortunately, manually
determining the resource consumption of programs is
difficult and errorprone. We provide an overview of
a state of the art framework for automatically
obtaining both upper and lower bounds on the
resource consumption of programs. The bounds
obtained are functions on the size of the input
arguments to the program and are obtained
statically, i.e., without running the program. Due
to the approximations introduced, the framework can
fail to obtain (nontrivial) bounds even if they
exist. On the other hand, modulo implementation
bugs, the bounds thus obtained are valid for any
execution of the program. The framework has been
implemented in the COSTA system and can provide
useful bounds for realistic objectoriented and
actorbased concurrent programs.
@inproceedings{AlbertAGGP12b,
author = {Elvira Albert and
Puri Arenas and
Samir Genaim and
Miguel G{\'o}mezZamalloa and
Germ{\'a}n Puebla},
title = {{A}utomatic {I}nference of {R}esource {C}onsumption {B}ounds},
editor = {Nikolaj Bj{\o}rner and Andrei Voronkov},
booktitle = {Proceedings of the 18th International Conference on Logic for Programming, Artificial Intelligence, and Reasoning (LPAR18)},
pages = {111},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = 7180,
year = 2012,
isbn = {9783642287169},
issn = {03029743},
doi = {10.1007/9783642287176_1}
}

[17]

Amir M. BenAmram, Samir Genaim, and Abu Naser Masud.
On the Termination of Integer Loops.
In Viktor Kuncak and Andrey Rybalchenko, editors, Proceedings of
the 13th International Conference on Verification, Model Checking, and
Abstract Interpretation (VMCAI'12), volume 7148 of Lecture Notes in
Computer Science, pages 7287. Springer, 2012.
[ bibtex 
abstract 
DOI ]
In this paper we study the decidability of termination
of several variants of simple integer loops, without
branching in the loop body and with affine
constraints as the loop guard (and possibly a
precondition). We show that termination of such
loops is undecidable in some cases, in particular,
when the body of the loop is expressed by a set of
linear inequalities where the coefficients are from
ZU{r} with r an arbitrary irrational; or
when the loop is a sequence of instructions, that
compute either linear expressions or the step
function. The undecidability result is proven by a
reduction from counter programs, whose termination
is known to be undecidable. For the common case of
integer constraints loops with rational coefficients
only we have not succeeded in proving decidability
nor undecidability of termination, however, this
attempt led to the result that a Petri net can be
simulated with such a loop, which implies some
interesting lower bounds. For example, termination
for a given input is at least EXPSPACEhard.
@inproceedings{BenAmramGM12,
author = {Amir M. BenAmram and Samir Genaim and Abu Naser Masud},
title = {{O}n the {T}ermination of {I}nteger {L}oops},
editor = {Viktor Kuncak and Andrey Rybalchenko},
booktitle = {Proceedings of the 13th International Conference on Verification, Model Checking, and Abstract Interpretation (VMCAI'12)},
pages = {7287},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = 7148,
year = 2012,
isbn = {9783642279393},
issn = {03029743},
doi = {10.1007/9783642279409_6}
}

[18]

Elvira Albert, Puri Arenas, Samir Genaim, Miguel GómezZamalloa, and German
Puebla.
Cost Analysis of Concurrent OO Programs.
In Hongseok Yang, editor, Proceedings of the 9th Asian Symposium
on Programming Languages and Systems (APLAS'11), volume 7078 of Lecture
Notes in Computer Science, pages 238254. Springer, 2011.
[ bibtex 
abstract 
DOI ]
Cost analysis aims at automatically approximating the
resource consumption (e.g., memory) of
executing a program in terms of its input
parameters. While cost analysis for sequential
programming languages has received considerable
attention, concurrency and distribution have been
notably less studied. The main challenges (and our
contributions) of cost analysis in a concurrent
setting are: (1) Inferring precise size
relations for data in the program in the presence
of shared memory. This information is essential for
bounding the number of iterations of loops. (2)
Distribution suggests that analysis must keep the
cost of the diverse distributed components
separate. We handle this by means of a novel form of
recurrence equations which are parametric on
the notion of cost center, which represents a
corresponding component. To the best of our
knowledge, our work is the first one to present a
general cost analysis framework and an
implementation for concurrent OO programs.
@inproceedings{AlbertAGGP11,
author = {Elvira Albert and
Puri Arenas and
Samir Genaim and
Miguel G{\'o}mezZamalloa and
German Puebla},
title = {{C}ost {A}nalysis of {C}oncurrent {OO} {P}rograms},
editor = {Hongseok Yang},
booktitle = {Proceedings of the 9th Asian Symposium on Programming Languages and Systems (APLAS'11)},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
volume = 7078,
year = 2011,
pages = {238254},
doi = {10.1007/9783642253188_19},
isbn = {9783642253171},
issn = {03029743}
}

[19]

Elvira Albert, Samir Genaim, Miguel GómezZamalloa, Einar Broch Johnsen,
Rudolf Schlatte, and Silvia Lizeth Tapia Tarifa.
Simulating Concurrent Behaviors with WorstCase Cost
Bounds.
In Michael Butler and Wolfram Schulte, editors, Proceedings of
the 17th International Symposium on Formal Methods (FM'11), volume 6664 of
Lecture Notes in Computer Science, pages 353368. Springer, 2011.
[ bibtex 
abstract 
DOI ]
Modern software systems are increasingly being
developed for deployment on a range of
architectures. For this purpose, it is interesting
to capture aspects of lowlevel deployment concerns
in highlevel modeling languages. In this paper, an
executable objectoriented modeling language is
extended with resourcerestricted deployment
components. To analyze model behavior a formal
methodology is proposed to assess resource
consumption, which balances the scalability of the
method and the reliability of the obtained
results. The approach applies to a general notion of
resource, including traditional cost measures (e.g.,
time, memory) as well as concurrencyrelated
measures (e.g., requests to a server, spawned
tasks). The main idea of our approach is to combine
reliable (but expensive) worstcase cost analysis of
statically predictable parts of the model with fast
(but inherently incomplete) simulations of the
concurrent aspects in order to avoid the statespace
explosion. The approach is illustrated by the
analysis of memory consumption.
@inproceedings{AlbertGGJST11,
author = {Elvira Albert and
Samir Genaim and
Miguel G{\'o}mezZamalloa and
Einar Broch Johnsen and
Rudolf Schlatte and
Silvia Lizeth Tapia Tarifa},
title = {{S}imulating {C}oncurrent {B}ehaviors with
{W}orst{C}ase {C}ost {B}ounds},
editor = {Michael Butler and Wolfram Schulte},
booktitle = {Proceedings of the 17th International Symposium on Formal Methods (FM'11)},
year = 2011,
pages = {353368},
series = {Lecture Notes in Computer Science},
volume = {6664},
publisher = {Springer},
year = {2011},
isbn = {9783642214363},
issn = {03029743},
doi = {10.1007/9783642214370_27}
}

[20]

Elvira Albert, Puri Arenas, Samir Genaim, and Damiano Zanardini.
TaskLevel Analysis for a Language with AsyncFinish
Parallelism.
In Jan Vitek and Bjorn De Sutter, editors, Proceedings of the
ACM SIGPLAN/SIGBED 2011 conference on Languages, compilers, and tools for
embedded systems (LCTES'11), pages 2130. ACM, 2011.
[ bibtex 
abstract 
DOI ]
The tasklevel of a program is the maximum number of
tasks that can be available (i.e., not finished nor
suspended) simultaneously during its execution for
any input data. Static knowledge of the tasklevel
is of utmost importance for understanding and
debugging parallel programs as well as for guiding
task schedulers. We present, to the best of our
knowledge, the first static analysis which infers
safe and precise approximations on the tasklevel
for a language with asyncfinish parallelism. In
parallel languages, async and finish are the basic
constructs, respectively, for spawning tasks and for
waiting until they terminate. They are the core of
modern, parallel, distributed languages like X10.
Given a (parallel) program, our analysis returns a
tasklevel upper bound, i.e., a function on the
program's input arguments that guarantees that the
tasklevel of the program will never exceed its
value along any execution. Our analysis provides a
series of useful (over)approximations, going from
the total number of tasks spawned in the execution
up to an accurate estimation of the tasklevel.
@inproceedings{AlbertAGZ11,
author = {Elvira Albert and
Puri Arenas and
Samir Genaim and
Damiano Zanardini},
title = {Task{L}evel {A}nalysis for a {L}anguage with
{A}sync{F}inish {P}arallelism},
booktitle = {Proceedings of the ACM SIGPLAN/SIGBED 2011 conference on Languages, compilers, and tools for embedded systems (LCTES'11)},
editor = {Jan Vitek and Bjorn De Sutter},
publisher = {ACM},
year = 2011,
pages = {2130},
isbn = {9781450305556},
doi = {10.1145/1967677.1967681}
}

[21]

Elvira Albert, Samir Genaim, and Abu Naser Masud.
More Precise yet Widely Applicable Cost Analysis.
In Ranjit Jhala and David A. Schmidt, editors, Proceedings of
the 12th International Conference on Verification, Model Checking, and
Abstract Interpretation (VMCAI'11), volume 6538 of Lecture Notes in
Computer Science, pages 3853. Springer, 2011.
[ bibtex 
abstract 
DOI ]
Cost analysis aims at determining the amount of
resources required to run a program in terms of its
input data sizes. Automatically inferring precise
bounds, while at the same time being able to handle
a wide class of programs, is a main challenge in
cost analysis. (1) Existing methods which rely on
computer algebra systems (CAS ) to solve the ob
tained cost recurrence equations (CR) are very
precise when applicable, but handle a very
restricted class of CR. (2) Specific solvers
developed for CR tend to sacrifice accuracy for
wider applicability. In this paper, we present a
novel approach to inferring precise upper and lower
bounds on CR which, when compared to (1), is
strictly more widely applicable while precision is
kept and, when compared to (2), is in practice more
precise and still keeps wide applicability. The main
novelty is that we are able to accurately bound the
worstcase/bestcase cost of each iteration of the
program loops and, then, by summing the resulting
sequences, we obtain precise upper/lower
bounds. Preliminary experimental results on Java
(bytecode) programs confirm the good balance between
the accu racy of our analysis and its
applicability.
@inproceedings{AlbertGM11,
author = {Elvira Albert and Samir Genaim and Abu Naser Masud},
title = {{M}ore {P}recise yet {W}idely {A}pplicable {C}ost
{A}nalysis},
booktitle = {Proceedings of the 12th International Conference on Verification, Model Checking, and Abstract Interpretation (VMCAI'11)},
editor = {Ranjit Jhala and David A. Schmidt},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = 6538,
pages = {3853},
year = 2011,
isbn = {9783642182747},
issn = {03029743},
doi = {10.1007/9783642182754_5}
}

[22]

Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Diana
Ramírez.
From Object Fields to Local Variables: A Practical
Approach to FieldSensitive Analysis.
In Radhia Cousot and Matthieu Martel, editors, Proceedings of
the 17th International Symposium on Static Analysis (SAS'10), volume 6337 of
Lecture Notes in Computer Science, pages 100116. Springer, 2010.
[ bibtex 
abstract 
DOI ]
Static analysis which takes into account the value of
data stored in the heap is typically considered
complex and computationally intractable in
practice. Thus, most static analyzers do not keep
track of object fields (or fields for short), i.e.,
they are fieldinsensitive. In this paper, we
propose locality conditions for soundly converting
fields into local variables. This way,
fieldinsensitive analysis over the transformed
program can infer information on the original
fields. Our notion of locality is contextsensitive
and can be applied both to numeric and reference
fields. We propose then a polyvariant
transformation which actually converts object fields
meeting the locality condition into variables and
which is able to generate multiple versions of code
when this leads to increasing the amount of fields
which satisfy the locality conditions. We have
implemented our analysis within a termination
analyzer for Java bytecode. Interestingly, we can
now prove termination of programs which use
iterators without the need of a sophisticated heap
analysis.
@inproceedings{AlbertAGPR10,
author = {Elvira Albert and
Puri Arenas and
Samir Genaim and
Germ{\'a}n Puebla and
Diana Ram\'irez},
title = {{F}rom {O}bject {F}ields to {L}ocal {V}ariables: {A}
{P}ractical {A}pproach to {F}ield{S}ensitive
{A}nalysis},
booktitle = {Proceedings of the 17th International Symposium on Static Analysis (SAS'10)},
editor = {Radhia Cousot and Matthieu Martel},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = 6337,
isbn = {9783642157684},
issn = {03029743},
year = 2010,
pages = {100116},
doi = {10.1007/9783642157691_7}
}

[23]

Elvira Albert, Samir Genaim, and Miguel GómezZamalloa.
Parametric Inference of Memory Requirements for Garbage
Collected Languages.
In Jan Vitek and Doug Lea, editors, Proceedings of the 2010
International Symposium on Memory Management (ISMM'10), pages 121130. ACM,
2010.
[ bibtex 
abstract 
DOI ]
The accurate prediction of program's memory
requirements is a critical component in software
development. Existing heap space analyses either do
not take deallocation into account or adopt specific
models of garbage collectors which do not
necessarily correspond to the actual memory usage.
We present a novel approach to inferring upper
bounds on memory requirements of Javalike programs
which is parametric on the notion of
object lifetime, i.e., on when objects become
collectable. If objects lifetimes are inferred by a
reachability analysis, then our analysis
infers accurate upper bounds on the memory
consumption for a reachabilitybased garbage
collector. Interestingly, if objects lifetimes are
inferred by a heap liveness analysis, then we
approximate the program minimal memory requirement,
i.e., the peak memory usage when using an optimal
garbage collector which frees objects as soon as
they become dead. The key idea is to integrate
information on objects lifetimes into the process of
generating the recurrence equations which
capture the memory usage at the different program
states. If the heap size limit is set to the memory
requirement inferred by our analysis, it is ensured
that execution will not exceed the memory limit with
the only assumption that garbage collection works
when the limit is to be reached. Experiments on Java
bytecode programs provide evidence of the
feasibility and accuracy of our analysis.
@inproceedings{AlbertGG10,
author = {Elvira Albert and
Samir Genaim and
Miguel G\'{o}mezZamalloa},
title = {{P}arametric {I}nference of {M}emory {R}equirements for
{G}arbage {C}ollected {L}anguages},
editor = {Jan Vitek and Doug Lea},
booktitle = {Proceedings of the 2010 International Symposium on Memory Management (ISMM'10)},
year = 2010,
isbn = {9781450300544},
pages = {121130},
doi = {10.1145/1806651.1806671},
publisher = {ACM}
}

[24]

Michael Codish, Samir Genaim, and Peter J. Stuckey.
A Declarative Encoding of Telecommunications Feature
Subscription in SAT.
In António Porto and Francisco Javier LópezFraguas, editors,
Proceedings of the 11th International ACM SIGPLAN Conference on
Principles and Practice of Declarative Programming (PPDP'09), pages
255266. ACM, 2009.
[ bibtex 
abstract 
DOI ]
This paper describes the encoding of a
telecommunications feature subscription
configuration problem to propositional logic and its
solution using a stateoftheart Boolean
satisfaction solver. The transformation of a
problem instance to a corresponding propositional
formula in conjunctive normal form is obtained in a
declarative style. An experimental evaluation
indicates that our encoding is considerably faster
than previous approaches based on the use of Boolean
satisfaction solvers. The key to obtaining such a
fast solver is the careful design of the Boolean
representation and of the basic operations in the
encoding. The choice of a declarative programming
style makes the use of complex circuit designs
relatively easy to incorporate into the encoder and
to fine tune the application.
@inproceedings{CodishGS09,
author = {Michael Codish and Samir Genaim and Peter J. Stuckey},
title = {A {D}eclarative {E}ncoding of {T}elecommunications {F}eature {S}ubscription in {SAT}},
booktitle = {Proceedings of the 11th International ACM SIGPLAN Conference on Principles and Practice of Declarative Programming (PPDP'09)},
editor = {Ant{\'o}nio Porto and Francisco Javier L{\'o}pezFraguas},
year = 2009,
pages = {255266},
doi = {10.1145/1599410.1599442},
publisher = {ACM},
isbn = {9781605585680}
}

[25]

Elvira Albert, Puri Arenas, Samir Genaim, Israel Herraiz, and Germán Puebla.
Comparing Cost Functions in Resource Analysis.
In Marko C. J. D. van Eekelen and Olha Shkaravska, editors,
Proceedings of the 1st International Workshop on Foundational and Practical
Aspects of Resource Analysis (FOPARA'09), volume 6324 of Lecture Notes
in Computer Science, pages 117. Springer, 2009.
[ bibtex 
abstract 
DOI ]
Cost functions provide information about the
amount of resources required to execute a program in
terms of the sizes of input arguments. They can
provide an upperbound, a lowerbound, or the
averagecase cost. Motivated by the existence of a
number of automatic cost analyzers which produce
cost functions, we propose an approach for
automatically proving that a cost function is
smaller than another one. In all applications of
resource analysis, such as resourceusage
verification, program synthesis and optimization,
etc., it is essential to compare cost
functions. This allows choosing an implementation
with smaller cost or guaranteeing that the given
resourceusage bounds are preserved. Unfortunately,
automatically generated cost functions for realistic
programs tend to be rather intricate, defined by
multiple cases, involving nonlinear subexpressions
(e.g., exponential, polynomial and logarithmic) and
they can contain multiple variables, possibly
related by means of constraints. Thus, comparing
cost functions is far from trivial. Our approach
first syntactically transforms functions into
simpler forms and then applies a number of
sufficient conditions which guarantee that a set of
expressions is smaller than another expression. Our
preliminary implementation in the COSTA system
indicates that the approach can be useful in
practice.
@inproceedings{AlbertAGHP09,
author = {Elvira Albert and
Puri Arenas and
Samir Genaim and
Israel Herraiz and
Germ{\'a}n Puebla},
title = {{C}omparing {C}ost {F}unctions in {R}esource {A}nalysis},
booktitle = {Proceedings of the 1st International Workshop on Foundational and Practical Aspects of Resource Analysis (FOPARA'09)},
editor = {Marko C. J. D. van Eekelen and Olha Shkaravska},
year = 2009,
volume = 6324,
pages = {117},
doi = {10.1007/9783642153310_1},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
issn = {03029743},
isbn = {9783642153303}
}

[26]

Elvira Albert, Diego Alonso, Puri Arenas, Samir Genaim, and Germán Puebla.
Asymptotic Resource Usage Bounds.
In Zhenjiang Hu, editor, Proceedings of the 7th Asian Symposium
on Programming Languages and Systems (APLAS'09), volume 5904 of Lecture
Notes in Computer Science, pages 294310. Springer, 2009.
[ bibtex 
abstract 
DOI ]
When describing the resource usage of a program, it is
usual to talk in asymptotic terms, such as
the wellknown “big O” notation, whereby we focus
on the behaviour of the program for large input data
and make a rough approximation by considering as
equivalent programs whose resource usage grows at
the same rate. Motivated by the existence of
nonasymptotic resource usage analyzers, in
this paper, we develop a novel transformation from a
nonasymptotic cost function (which can be produced
by multiple resource analyzers) into its asymptotic
form. Our transformation aims at producing tight
asymptotic forms which do not contain
redundant subexpressions (i.e., expressions
asymptotically subsumed by others). Interestingly,
we integrate our transformation at the heart of a
cost analyzer to generate asymptotic upper
bounds without having to first compute their
nonasymptotic counterparts. Our experimental
results show that, while nonasymptotic cost
functions become very complex, their asymptotic
forms are much more compact and manageable. This is
essential to improve scalability and to enable the
application of cost analysis in resourceaware
verification/certification.
@inproceedings{AlbertAAGP09,
author = {Elvira Albert and
Diego Alonso and
Puri Arenas and
Samir Genaim and
Germ{\'a}n Puebla},
title = {{A}symptotic {R}esource {U}sage {B}ounds},
booktitle = {Proceedings of the 7th Asian Symposium on Programming Languages and Systems (APLAS'09)},
year = 2009,
editor = {Zhenjiang Hu},
series = {Lecture Notes in Computer Science},
volume = 5904,
pages = {294310},
isbn = {9783642106712},
issn = {03029743},
publisher = {Springer},
doi = {10.1007/9783642106729_21}
}

[27]

Elvira Albert, Puri Arenas, Samir Genaim, and Germán Puebla.
FieldSensitive Value Analysis by FieldInsensitive
Analysis.
In Ana Cavalcanti and Dennis Dams, editors, Proceedings of the
16th International Symposium on Formal Methods (FM'09), volume 5850 of
Lecture Notes in Computer Science, pages 370386. Springer, 2009.
[ bibtex 
abstract 
DOI ]
Shared mutable data structures pose major problems in
static analysis and most analyzers are unable to
keep track of the values of numeric variables
stored in the heap. In this paper, we first
identify sufficient conditions under which heap
allocated numeric variables in object oriented
programs (i.e., numeric fields) can be handled as
nonheap allocated variables. Then, we present a
static analysis to infer which numeric fields
satisfy these conditions at the level of
(sequential) bytecode. This allows
instrumenting the code with ghost variables
which make such numeric fields observable to any
fieldinsensitive value analysis. Our experimental
results in termination analysis show that we greatly
enlarge the class of analyzable programs with a
reasonable overhead.
@inproceedings{AlbertAGP09,
author = {Elvira Albert and Puri Arenas and Samir Genaim and
Germ{\'a}n Puebla},
title = {{F}ield{S}ensitive {V}alue {A}nalysis by {F}ield{I}nsensitive {A}nalysis},
booktitle = {Proceedings of the 16th International Symposium on Formal Methods (FM'09)},
year = 2009,
editor = {Ana Cavalcanti and Dennis Dams},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
volume = {5850},
pages = {370386},
isbn = {9783642050893},
issn = {03029743},
doi = {10.1007/9783642050893_24}
}

[28]

Elvira Albert, Samir Genaim, and Miguel GómezZamalloa.
Live Heap Space Analysis for Languages with Garbage
Collection.
In Hillel Kolodner and Guy L. Steele Jr., editors, Proceedings
of the 8th International Symposium on Memory Management (ISMM'09), pages
129138. ACM, 2009.
[ bibtex 
abstract 
DOI ]
The peak heap consumption of a program is the
maximum size of the live data on the heap
during the execution of the program, i.e., the
minimum amount of heap space needed to run the
program without exhausting the memory. It is
wellknown that garbage collection (GC) makes the
problem of predicting the memory required to run a
program difficult. This paper presents, the best of
our knowledge, the first live heap space
analysis for garbagecollected languages which
infers accurate upper bounds on the peak heap usage
of a program's execution that are not restricted to
any complexity class, i.e., we can infer
exponential, logarithmic, polynomial, etc.,
bounds. Our analysis is developed for an
(sequential) objectoriented bytecode language with
a scopedmemory manager that reclaims
unreachable memory when methods return. We also show
how our analysis can accommodate other GC schemes
which are closer to the ideal GC which
collects objects as soon as they become unreachable.
The practicality of our approach is experimentally
evaluated on a prototype implementation. We
demonstrate that it is fully automatic, reasonably
accurate and efficient by inferring live heap space
bounds for a standardized set of benchmarks, the
JOlden suite.
@inproceedings{AlbertGG09,
author = {Elvira Albert and
Samir Genaim and
Miguel G{\'o}mezZamalloa},
title = {{L}ive {H}eap {S}pace {A}nalysis for {L}anguages with
{G}arbage {C}ollection},
editor = {Hillel Kolodner and Guy L. Steele Jr.},
booktitle = {Proceedings of the 8th International Symposium on Memory Management (ISMM'09)},
year = 2009,
pages = {129138},
isbn = {9781605583471},
publisher = {ACM},
doi = {10.1145/1542431.1542450}
}

[29]

Elvira Albert, Puri Arenas, Samir Genaim, and Germán Puebla.
Automatic Inference of Upper Bounds for Recurrence
Relations in Cost Analysis.
In María Alpuente and Germán Vidal, editors,
Proceedings of the 15th International Symposium on Static Analysis (SAS'08),
volume 5079 of Lecture Notes in Computer Science, pages 221237.
Springer, 2008.
[ bibtex 
abstract 
DOI ]
COSTA is a research prototype which performs automatic
program analysis and which is able to infer cost and
termination information about Java bytecode
programs. The system receives as input a bytecode
program and a cost model chosen from a selection of
resource descriptions, and tries to bound the
resource consumption of the program with respect to
the given cost model. COSTA provides several
nontrivial notions of resource, such as the amount
of memory allocated on the heap, the number of
bytecode instructions executed, the number of
billable events (such as sending a text message on a
mobile phone) executed by the program. When
performing cost analysis, COSTA produces a cost
equation system, which is an extended form of
recurrence relations. In order to obtain a closed
(i.e., nonrecursive) form for such recurrence
relations which represents an upper bound, COSTA
includes a dedicated solver. An interesting feature
of COSTA is that it uses pretty much the same
machinery for inferring upper bounds on cost as for
proving termination (which also implies the
boundedness of any resource consumption).
@inproceedings{AlbertAGP08,
author = {Elvira Albert and
Puri Arenas and
Samir Genaim and
Germ{\'a}n Puebla},
title = {{A}utomatic {I}nference of {U}pper {B}ounds for
{R}ecurrence {R}elations in {C}ost {A}nalysis},
booktitle = {Proceedings of the 15th International Symposium on Static Analysis (SAS'08)},
editor = {Mar\'{\i}a Alpuente and Germ{\'a}n Vidal},
year = 2008,
pages = {221237},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = 5079,
isbn = {9783540691631},
issn = {03029743},
doi = {10.1007/9783540691662_15}
}

[30]

Elvira Albert, Puri Arenas, Michael Codish, Samir Genaim, Germán Puebla,
and Damiano Zanardini.
Termination Analysis of Java Bytecode.
In Gilles Barthe and Frank S. de Boer, editors, Proceedings of
the 10th IFIP WG 6.1 International Conference on Formal Methods for Open
ObjectBased Distributed Systems (FMOODS'08), volume 5051 of Lecture
Notes in Computer Science, pages 218. Springer, 2008.
[ bibtex 
abstract 
DOI ]
Termination analysis has received considerable
attention, traditionally in the context of
declarative programming, and recently also for
imperative languages. In existing approaches,
termination is performed on source
programs. However, there are many situations,
including mobile code, where only the compiled code
is available. In this work we present an automatic
termination analysis for sequential Java Bytecode
programs. Such analysis presents all of the
challenges of analyzing a lowlevel language as well
as those introduced by objectoriented languages.
Interestingly, given a bytecode program, we produce
a constraint logic program, CLP, whose
termination entails termination of the bytecode
program. This allows applying the large body of work
in termination of CLP programs to termination of
Java bytecode. A prototype analyzer is described
and initial experimentation is reported.
@inproceedings{AlbertACGPZ08,
author = {Elvira Albert and
Puri Arenas and
Michael Codish and
Samir Genaim and
Germ{\'a}n Puebla and
Damiano Zanardini},
title = {{T}ermination {A}nalysis of {J}ava {B}ytecode},
booktitle = {Proceedings of the 10th IFIP WG 6.1 International Conference on Formal Methods for Open ObjectBased Distributed Systems (FMOODS'08)},
editor = {Gilles Barthe and Frank S. de Boer},
year = 2008,
pages = {218},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = 5051,
isbn = {9783540688624},
issn = {03029743},
doi = {10.1007/9783540688631_2}
}

[31]

Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Damiano Zanardini.
Removing Useless Variables in Cost Analysis of Java
Bytecode.
In Roger L. Wainwright and Hisham Haddad, editors, Proceedings
of the 2008 ACM Symposium on Applied Computing (SAC'08), pages 368375.
ACM, 2008.
[ bibtex 
abstract 
DOI ]
Automatic cost analysis has interesting applications
in the context of verification and certification of
mobile code. For instance, the code receiver can
use cost information in order to decide whether to
reject mobile code which has too large cost
requirements in terms of computing resources (in
time and/or space) or billable events (SMSs sent,
bandwidth required). Existing cost analyses for a
variety of languages describe the resource
consumption of programs by means of Cost
Equation Systems (CESs), which are similar to, but
more general than recurrence equations. CESs express
the cost of a program in terms of the size of its
input data. In a further step, a closed form (i.e.,
nonrecursive) solution or upper bound can sometimes
be found by using existing Computer Algebra Systems
(CASs), such as Maple and Mathematica. In this
work, we focus on cost analysis of Java bytecode, a
language which is widely used in the context of
mobile code and we study the problem of identifying
variables which are useless in the sense that
they do not affect the execution cost and therefore
can be ignored by cost analysis. We identify two
classes of useless variables and propose automatic
analysis techniques to detect them. The first class
corresponds to stack variables that can be
replaced by program variables or constant values.
The second class corresponds to variables whose
value is costirrelevant, i.e., does not
affect the cost of the program. We propose an
algorithm, inspired in static slicing which
safely identifies costirrelevant variables. The
benefits of eliminating useless variables are
twofold: (1) cost analysis without useless
variables can be more efficient and (2) resulting
CESs are more likely to be solvable by existing
CASs.
@inproceedings{AlbertAGPZ08,
author = {Elvira Albert and
Puri Arenas and
Samir Genaim and
Germ{\'a}n Puebla and
Damiano Zanardini},
title = {{R}emoving {U}seless {V}ariables in {C}ost {A}nalysis of {J}ava {B}ytecode},
editor = {Roger L. Wainwright and Hisham Haddad},
booktitle = {Proceedings of the 2008 ACM Symposium on Applied Computing (SAC'08)},
year = 2008,
pages = {368375},
isbn = {9781595937537},
doi = {10.1145/1363686.1363779},
publisher = {ACM}
}

[32]

Elvira Albert, Samir Genaim, and Miguel GómezZamalloa.
Heap Space Analysis for Java Bytecode.
In Greg Morrisett and Mooly Sagiv, editors, Proceedings of the
6th International Symposium on Memory management (ISMM'07), pages 105116.
ACM, 2007.
[ bibtex 
abstract 
DOI ]
This article presents a heap space analysis for
(sequential) Java bytecode. The analysis generates
heap space cost relations which define at
compiletime the heap consumption of a program as a
function of its data size. These relations can be
used to obtain upper bounds on the heap space
allocated during the execution of the different
methods. In addition, we describe how to refine the
cost relations, by relying on escape
analysis, in order to take into account the heap
space that can be safely deallocated by the garbage
collector upon exit from a corresponding
method. These refined cost relations are then used
to infer upper bounds on the active heap
space upon methods return. Example applications
for the analysis consider inference of constant heap
usage and heap usage proportional to the data size
(including polynomial and exponential heap
consumption). Our prototype implementation is
reported and demonstrated by means of a series of
examples which illustrate how the analysis naturally
encompasses standard datastructures like lists,
trees and arrays with several dimensions written in
objectoriented programming style.
@inproceedings{AlbertGG07,
author = {Elvira Albert and
Samir Genaim and
Miguel G{\'o}mezZamalloa},
title = {{H}eap {S}pace {A}nalysis for {J}ava {B}ytecode},
booktitle = {Proceedings of the 6th International Symposium on Memory management (ISMM'07)},
editor = {Greg Morrisett and Mooly Sagiv},
year = 2007,
isbn = {9781595938930},
pages = {105116},
doi = {10.1145/1296907.1296922},
publisher = {ACM}
}

[33]

Elvira Albert, Puri Arenas, Samir Genaim, Germán Puebla, and Damiano Zanardini.
Cost Analysis of Java Bytecode.
In Rocco De Nicola, editor, Proceedings of the 16th European
Symposium on Programming (ESOP'07), volume 4421 of Lecture Notes in
Computer Science, pages 157172. SpringerVerlag, 2007.
[ bibtex 
abstract 
DOI ]
Cost analysis of Java bytecode is complicated by its
unstructured control flow, the use of an operand
stack and its objectoriented programming features
(like dynamic dispatching). This paper addresses
these problems and develops a generic framework for
the automatic cost analysis of sequential Java
bytecode. Our method generates cost
relations which define at compiletime the cost of
programs as a function of their input data size. To
the best of our knowledge, this is the first
approach to the automatic cost analysis of Java
bytecode.
@inproceedings{AlbertAGPZ07,
author = {Elvira Albert and Puri Arenas and Samir Genaim and
Germ{\'a}n Puebla and Damiano Zanardini},
title = {{C}ost {A}nalysis of {J}ava {B}ytecode},
booktitle = {Proceedings of the 16th European Symposium on Programming (ESOP'07)},
year = 2007,
editor = {Rocco De Nicola},
series = {Lecture Notes in Computer Science},
publisher = {SpringerVerlag},
volume = {4421},
pages = {157172},
issn = {03029743},
isbn = {9783540713142},
doi = {10.1007/9783540713166_12}
}

[34]

Andy King, Lunjin Lu, and Samir Genaim.
Detecting Determinacy in Prolog Programs.
In Sandro Etalle and Miroslaw Truszczynski, editors, Proceedings
of the 22nd International Conference on Logic Programming (ICLP'06), volume
4079 of Lecture Notes in Computer Science, pages 132147.
SpringerVerlag, 2006.
[ bibtex 
abstract 
DOI ]
The aim of determinacy inference is to infer a class of
calls to a given logic program, for which execution
will generate at most one answer and generate the
answer only once. Two serious impediments to
accurate determinacy inference are: (1) the way
bindings imposed by a rightmost goal can make a
leftmost goal deterministic; (2) the way determinacy
is often enforced with cut. Rather surprisingly,
this paper shows how: problem (1) can be tackled by
recasting determinacy inference as a problem in
concurrency; problem (2) can be addressed within
this concurrency framework. Experimental evaluation
shows that the new analysis can infer richer classes
of deterministic calls for many programs.
@inproceedings{KingLG06,
author = {Andy King and Lunjin Lu and Samir Genaim},
title = {{D}etecting {D}eterminacy in {P}rolog {P}rograms},
booktitle = {Proceedings of the 22nd International Conference on Logic Programming (ICLP'06)},
editor = {Sandro Etalle and Miroslaw Truszczynski},
pages = {132147},
doi = {10.1007/11799573_12},
publisher = {SpringerVerlag},
series = {Lecture Notes in Computer Science},
volume = {4079},
year = {2006},
isbn = {9783540713142},
issn = {03029743}
}

[35]

Samir Genaim and Fausto Spoto.
Information Flow Analysis for Java Bytecode.
In Radhia Cousot, editor, Proceedings of the 6th International
Conference on Verification, Model Checking, and Abstract Interpretation
(VMCAI'05), volume 3385 of Lecture Notes in Computer Science, pages
346362. SpringerVerlag, 2005.
[ bibtex 
abstract 
DOI ]
We present a flow and context sensitive compositional
information flow analysis for full (monothreaded)
Java bytecode. We base our analysis on the
transformation of the Java bytecode into a
controlflow graph of basic blocks of code
which makes explicit the complex features of the
Java bytecode. We represent information flows
through Boolean functions and hence implement an
accurate and efficient information flow analysis
through binary decision diagrams. To the best of
our knowledge, it is the first one for full Java
bytecode.
@inproceedings{GenaimS05,
author = {Samir Genaim and Fausto Spoto},
title = {{I}nformation {F}low {A}nalysis for {J}ava {B}ytecode},
booktitle = {Proceedings of the 6th International Conference on Verification, Model Checking, and Abstract Interpretation (VMCAI'05)},
pages = {346362},
editor = {Radhia Cousot},
publisher = {SpringerVerlag},
series = {Lecture Notes in Computer Science},
volume = {3385},
year = {2005},
isbn = {9783540242970},
issn = {03029743},
doi = {10.1007/9783540305798_23}
}

[36]

Samir Genaim and Andy King.
GoalIndependent Suspension Analysis for Logic Programs
with Dynamic Scheduling.
In Pierpaolo Degano, editor, Proceedings of the 12th European
Symposium on Programming (ESOP'03), volume 2618 of Lecture Notes in
Computer Science, pages 8498. SpringerVerlag, 2003.
[ bibtex 
abstract 
DOI ]
A goalindependent suspension analysis is presented
that infers a class of goals for which a logic
program with delay can be executed without
suspension. The crucial point is that the analysis
does not verify that an (abstract) goal does not
lead to nonsuspension but infers (abstract) goals
that do not lead to nonsuspension. The analysis
has applications in debugging, verification and
program optimisation.
@inproceedings{GenaimK03,
author = {Samir Genaim and Andy King},
title = {{G}oal{I}ndependent {S}uspension {A}nalysis for
{L}ogic {P}rograms with {D}ynamic {S}cheduling},
editor = {Pierpaolo Degano},
booktitle = {Proceedings of the 12th European Symposium on Programming (ESOP'03)},
pages = {8498},
publisher = {SpringerVerlag},
series = {Lecture Notes in Computer Science},
isbn = {9783540008866},
issn = {03029743},
volume = {2618},
year = {2003},
doi = {10.1007/3540365753_7}
}

[37]

Maurice Bruynooghe, Michael Codish, Samir Genaim, and Wim Vanhoof.
Reuse of Results in Termination Analysis of Typed Logic
Programs.
In Manuel V. Hermenegildo and Germán Puebla, editors,
Proceedings of the 9th International Symposium on Static Analysis (SAS'02),
volume 2477 of Lecture Notes in Computer Science, pages 477492.
SpringerVerlag, 2002.
[ bibtex 
abstract 
DOI ]
Recent works by the authors address the problem of
automating the selection of a candidate norm for the
purpose of termination analysis. These works
illustrate a powerful technique in which a
collection of simple typebased norms  one for
each data type in the program  are combined
together to provide the candidate norm. The current
paper extends these results by investigating type
polymorphism. We show that by considering
polymorphic types we reduce, without sacrificing
precision, the number of typebased norms which
should be combined to provide the candidate norm.
Moreover, we show that when a generic polymorphic
typed program component occurs in one or more
specific type contexts, we need not reanalyse
it. All of the information concerning its
termination and its effect on the termination of
other predicates in that context can be derived
directly from the context independent analysis of
that component based on norms derived from the
polymorphic types.
@inproceedings{BruynoogheCGV02,
author = {Maurice Bruynooghe and Michael Codish and Samir Genaim and Wim Vanhoof},
title = {{R}euse of {R}esults in {T}ermination {A}nalysis of {T}yped {L}ogic {P}rograms},
booktitle = {Proceedings of the 9th International Symposium on Static Analysis (SAS'02)},
editor = {Manuel V. Hermenegildo and Germ{\'a}n Puebla},
pages = {477492},
publisher = {SpringerVerlag},
series = {Lecture Notes in Computer Science},
volume = {2477},
year = {2002},
isbn = {9783540442356},
issn = {03029743},
doi = {10.1007/3540457895_33}
}

[38]

Samir Genaim, Michael Codish, John P. Gallagher, and Vitaly Lagoon.
Combining Norms to Prove Termination.
In Agostino Cortesi, editor, Proceedings of the 3rd
International Workshop on Verification, Model Checking, and Abstract
Interpretation (VMCAI'02), volume 2294 of Lecture Notes in Computer
Science, pages 126138. SpringerVerlag, 2002.
[ bibtex 
abstract 
DOI ]
Automatic termination analysers typically measure the
size of terms applying norms which are mappings from
terms to the natural numbers. This paper illustrates
how to enable the use of size functions defined as
tuples of these simpler norm functions. This
approach enables us to simplify the problem of
deriving automatically a candidate norm with which
to prove termination. Instead of deriving a single,
complex norm function, it is sufficient to determine
a collection of simpler norms, some combination of
which, leads to a proof of termination. We propose
that a collection of simple norms, one for each of
the recursive datatypes in the program, is often a
suitable choice. We first demonstrate the power of
combining norm functions and then the adequacy of
combining norms based on regular types.
@inproceedings{GenaimCGL02,
author = {Samir Genaim and Michael Codish and John P. Gallagher and Vitaly Lagoon},
title = {{C}ombining {N}orms to {P}rove {T}ermination},
booktitle = {Proceedings of the 3rd International Workshop on Verification, Model Checking, and Abstract Interpretation (VMCAI'02)},
editor = {Agostino Cortesi},
pages = {126138},
publisher = {SpringerVerlag},
series = {Lecture Notes in Computer Science},
volume = {2294},
year = {2002},
isbn = {9783540436317},
issn = {03029743},
doi = {10.1007/3540478132_9}
}

[39]

Samir Genaim and Michael Codish.
The Definite Approach to Dependency Analysis.
In David Sands, editor, Proceedings of the 10th European
Symposium on Programming (ESOP'01), volume 2028 of Lecture Notes in
Computer Science, pages 417432. SpringerVerlag, 2001.
[ bibtex 
abstract 
DOI ]
We propose a new representation for the domain of
Definite Boolean functions. The key idea is to view
the set of models of a Boolean function as an
incidence relation between variables and
models. This enables two dual representations: the
usual one, in terms of models, specifying which
variables they contain; and the other in terms of
variables, specifying which models contain them. We
adopt the dual representation which provides a clean
theoretical basis for the definition of efficient
operations on Def in terms of classic ACI1
unification theory. Our approach illustrates in an
interesting way the relation of Def to the
wellknown setSharing domain which can also be
represented in terms of sets of models and ACI1
unification. From the practical side, a prototype
implementation provides promising results which
indicate that this representation supports efficient
groundness analysis using Def formula. Moreover,
widening on this representation is easily defined.
@inproceedings{GenaimC01b,
author = {Samir Genaim and Michael Codish},
title = {{T}he {D}efinite {A}pproach to {D}ependency {A}nalysis},
booktitle = {Proceedings of the 10th European Symposium on Programming (ESOP'01)},
editor = {David Sands},
pages = {417432},
publisher = {SpringerVerlag},
series = {Lecture Notes in Computer Science},
volume = {2028},
year = {2001},
isbn = {9783540418627},
issn = {03029743},
doi = {10.1007/3540453091_28}
}

[40]

Michael Codish, Samir Genaim, Harald Søndergaard, and Peter J. Stuckey.
HigherPrecision Groundness Analysis.
In Philippe Codognet, editor, Proceedings of the 17th
International Conference Logic Programming (ICLP'01), volume 2237 of
Lecture Notes in Computer Science, pages 135149. SpringerVerlag, 2001.
[ bibtex 
abstract 
DOI ]
Groundness analysis of logic programs using Posbased
abstract interpretation is one of the clear success
stories of the last decade in the area of logic
program analysis. In this work we identify two
problems with the Pos domain, the multiplicity and
sign problems, that arise independently in
groundness and uniqueness analysis. We describe how
these problems can be solved using an analysis based
on a domain Size for inferring term size relations.
However this solution has its own shortcomings
because it involves a widening operator which leads
to a loss of Pos information. Inspired by Pos, Size
and the LSign domain for abstract linear arithmetic
constraints we introduce a new domain LPos, and show
how it can be used for groundness and uniqueness
analysis. The idea is to use the sign information of
LSign to improve the widening of Size so that it
does not lose Pos information. We prove that the
resulting analyses using LPos are uniformly more
precise than those using Pos.
@inproceedings{CodishGSS01,
author = {Michael Codish and Samir Genaim and Harald
S{\o}ndergaard and Peter J. Stuckey},
title = {{H}igher{P}recision {G}roundness {A}nalysis},
booktitle = {Proceedings of the 17th International Conference Logic Programming (ICLP'01)},
editor = {Philippe Codognet},
pages = {135149},
publisher = {SpringerVerlag},
series = {Lecture Notes in Computer Science},
volume = {2237},
year = {2001},
isbn = {9783540429357},
issn = {03029743},
doi = {10.1007/354045635X_17}
}

[41]

Samir Genaim and Michael Codish.
Inferring Termination Conditions for Logic Programs Using
Backwards Analysis.
In Robert Nieuwenhuis and Andrei Voronkov, editors, Proceedings
of the 8th International Conference on Logic for Programming, Artificial
Intelligence, and Reasoning (LPAR'01), volume 2250 of Lecture Notes in
Computer Science, pages 685694. SpringerVerlag, 2001.
[ bibtex 
abstract 
DOI ]
This paper focuses on the inference of modes for which
a logic program is guaranteed to terminate. This
generalizes traditional termination analysis where
an analyzer tries to verify termination for a
specified mode. The contribution is a methodology
which combines traditional termination analysis and
backwards analysis to obtain termination inference.
We demonstrate the application of this methodology
to enhance a termination analyzer to perform also
termination inference.
@inproceedings{GenaimC01a,
author = {Samir Genaim and Michael Codish},
title = {{I}nferring {T}ermination {C}onditions for {L}ogic
{P}rograms {U}sing {B}ackwards {A}nalysis},
booktitle = {Proceedings of the 8th International Conference on Logic for Programming, Artificial Intelligence, and Reasoning (LPAR'01)},
editor = {Robert Nieuwenhuis and Andrei Voronkov},
publisher = {SpringerVerlag},
series = {Lecture Notes in Computer Science},
volume = 2250,
isbn = {9783540429579},
issn = {03029743},
year = 2001,
pages = {685694},
doi = {10.1007/3540456538_47}
}

[42]

Samir Genaim and Michael Codish.
Incremental Refinement of Semantic Based Program Analysis
for Logic Programs.
In Jenny. Edwards, editor, Proceedings of the 22nd Australasian
Computer Science Conference (ACSC'99), volume 21 of Australian Computer
Science Communications, pages 348359. Singapore ; New York : Springer,
1999.
[ bibtex 
abstract 
DOI 
.ps ]
This paper is concerned with the incremental
refinement of semantic based program analyses. We
address the following question: How can we use the
result of a less precise program analysis to
accelerate a more precise analysis of the same
program. We describe an approach based on abstract
program specialisation in which any approximation to
the result of a (more refined) program analysis is
used to specialise the evaluation process. When
analyses are based on abstract compilation this
boils down to adding constraints in the abstract
program which narrow its search space and accelerate
fix point evaluation based on recomputation. We
illustrate several examples of incremental analysis
for logic programs.
@inproceedings{GenaimC99,
author = {Samir Genaim and Michael Codish},
title = {{I}ncremental {R}efinement of {S}emantic {B}ased
{P}rogram {A}nalysis for {L}ogic {P}rograms},
booktitle = {Proceedings of the 22nd Australasian Computer Science Conference (ACSC'99)},
editor = {Jenny. Edwards},
series = {Australian Computer Science Communications},
volume = {21},
number = {1},
year = {1999},
pages = {348359},
publisher = {Singapore ; New York : Springer},
ee = {http://www.worldcat.org/title/computerscience99proceedingsofthe22ndaustralasiancomputerscienceconferenceacsc99aucklandnewzealand1821january1999/oclc/41549430},
isbn = {9789814021548}
}
