jpower.bib

@inproceedings{malloy-esem-2017,
  author = {Brian A. Malloy and James F. Power},
  title = {Quantifying the Transition from {P}ython 2 to 3: An Empirical Study of {P}ython Applications},
  booktitle = {ACM/IEEE International Symposium on Empirical Software Engineering and Measurement},
  year = 2017,
  month = {9-10 November},
  address = {Toronto, Canada},
  abstract = {
Python is one of the most popular modern
programming languages.  In 2008 its authors introduced a new version of the
language, Python 3.0, that was not backward compatible with
Python 2, initiating a transitional phase for Python software developers.
The study described in this paper investigates the degree
to which Python software developers are making the transition from
Python 2 to Python 3.
We have developed a Python compliance analyser,
PyComply, and have assembled a large corpus of Python applications.
We use PyComply to measure and quantify the degree to which Python 3
features are being used, as well as the rate and context of their adoption.
In fact, Python software developers are not
exploiting the new features and advantages of Python 3, but rather
are choosing to retain backward compatibility with Python 2.
Python developers are confining themselves to
a language subset, governed by the diminishing intersection of
Python 2, which is not under development, and Python 3, which is
under development with new features being introduced as the
language continues to evolve.
  },
  pdf = {2017/malloy-esem-2017.pdf}
}
@inproceedings{farrell-sefm-2017,
  author = {Marie Farrell and Rosemary Monahan and James F. Power},
  title = {Specification Clones: An Empirical Study of the Structure of {Event-B} Specifications},
  booktitle = {Software Engineering and Formal Methods},
  year = 2017,
  volume = 10469,
  series = {Lecture Notes in Computer Science},
  pages = {152--167},
  month = {4--8 September},
  doi = {10.1007/978-3-319-66197-1_10},
  pdf = {2017/farrell-sefm-2017.pdf},
  abstract = {
In this paper we present an empirical study of formal specifications
written in the Event-B language. Our study is exploratory, since it is
the first study of its kind, and we formulate metrics for Event-B
specifications which quantify the diversity of such specifications in
practice.  We pay particular attention to refinement as this is one of
the most notable features of Event-B.  However, Event-B is less
well-equipped with other standardised modularisation constructs, and
we investigate the impact of this by detecting and analysing
specification clones at different levels. We describe our algorithm
used to identify clones at the machine, context and event level, and
present results from an analysis of a large corpus of Event-B
specifications. Our study contributes to furthering research into the
area of metrics and modularisation in Event-B.
  }
}
@inproceedings{farrell-icfem-2017,
  author = {Marie Farrell and Rosemary Monahan and James F. Power},
  title = {Combining {Event-B} and {CSP}: An Institution Theoretic approach to Interoperability},
  booktitle = {International Conference on Formal Engineering Methods},
  year = 2017,
  month = {13-17 November},
  address = {Xi'an, China},
  pdf = {2017/farrell-icfem-2017.pdf},
  abstract = {
n this paper we present a formal framework designed to facilitate interoperability between the Event-B specification language and the process algebra CSP. Our previous work used the theory of institutions to provide a mathematically sound framework for Event-B, and this enables interoperability with CSP, which has already been incorporated into the institutional framework. This paper outlines a comorphism relationship between the institutions for Event-B and CSP, leveraging existing tool-chains to facilitate verification. We compare our work to the combined formalism Event-B|CSP and use a supporting example to illustrate the benefits of our approach.
}
}
@inproceedings{healy-fide16,
  author = {Andrew Healy and Rosemary Monahan and James F. Power},
  title = {Predicting {SMT} Solver Performance for Software Verification},
  booktitle = {3rd Workshop on Formal Integrated Development Environment},
  year = 2016,
  series = {Electronic Proceedings in Theoretical Computer Science},
  volume = {240},
  pages = {20-37},
  month = {November 8},
  address = {Limassol, Cyprus},
  pdf = {2016/healy-fide16.pdf},
  abstract = { The Why3 IDE and verification system facilitates the use
                  of a wide range of Satisfiability Modulo Theories
                  (SMT) solvers through a driver-based
                  architecture. We present Where4: a portfolio-based
                  approach to discharge Why3 proof obligations. We use
                  data analysis and machine learning techniques on
                  static metrics derived from program source code. Our
                  approach benefits software engineers by providing a
                  single utility to delegate proof obligations to the
                  solvers most likely to return a useful result. It
                  does this in a time-efficient way using existing
                  Why3 and solver installations - without requiring
                  low-level knowledge about SMT solver operation from
                  the user. },
  doi = {10.4204/EPTCS.240.2}
}
@inproceedings{farrell-wadt16,
  author = {Marie Farrell and Rosemary Monahan and James F. Power},
  title = {Providing a Semantics and Modularisation
Constructs for {Event-B} using Institutions},
  booktitle = {23rd International Workshop on Algebraic Development Techniques},
  year = 2016,
  month = {September 21-24},
  address = {Gregynog, Wales},
  pdf = {farrell-wadt16.pdf},
  annote = {Abstract only}
}
@article{cheng-sosym16,
  author = {Zheng Cheng and Rosemary Monahan and James F. Power},
  title = {Formalised {EMFTVM} bytecode language for sound verification of model transformations},
  journal = {Journal of Software and Systems Modeling},
  year = 2016,
  pdf = {2016/cheng-sosym16.pdf},
  doi = {doi:10.1007/s10270-016-0553-x},
  pages = {1-29},
  abstract = { Model Driven Engineering is an effective approach for
                  addressing the full life cycle of software
                  development. Model transformation is widely acknowl-
                  edged as one of its central ingredients. With the
                  increasing complexity of model transformations, it
                  is urgent to develop verification tools that prevent
                  incorrect transformations from generating faulty
                  models. However, the development of sound
                  verification tools is a non-trivial task, due to
                  unimplementable or erroneous execution semantics
                  encoded for the target model transformation
                  language. In this work, we develop a formalisation
                  for the EMFTVM bytecode language by using the Boogie
                  intermediate verification language. It ensures the
                  model transformation language has an implementable
                  execution semantics by reliably prototyping the
                  implementation of the model transformation
                  language. It also ensures the absence of erroneous
                  execution semantics encoded for the target model
                  transformation language by using a translation
                  validation approach.  }
}
@inproceedings{farrell-ifm16,
  author = {Marie Farrell and Rosemary Monahan and James F. Power},
  title = {Using the theory of institutions to integrate software models via refinement},
  booktitle = {PhD Symposium at iFM'16 on Formal Methods: Algorithms, Tools and Applications},
  year = 2016,
  month = {June 5},
  address = {Reykjavik, Iceland},
  annote = {Doctoral Symposuim}
}
@inproceedings{healy-sac16,
  author = {Andrew Healy and Rosemary Monahan and James F. Power},
  title = {Evaluating the use of a general-purpose benchmark suite for domain-specific {SMT}-solving},
  booktitle = {31st ACM Symposium on Applied Computing},
  address = {Pisa, Italy},
  month = {April 4-8},
  pages = {1558--1561},
  year = {2016},
  doi = {10.1145/2851613.2851975},
  pdf = {2016/healy-sac16.pdf},
  abstract = { Benchmark suites are an important resource in
                  validating performance requirements for
                  software. However, general-purpose suites may be
                  unsuitable for domain-specific purposes, and may
                  provide an incorrect indication of the software
                  performance.  This paper uses SMT-solvers
                  (Satisfiability Modulo Theories) as a
                  case-study. Taking deductive software verification
                  as a specific application domain for SMT-solvers, we
                  present an approach to quantifying the difference
                  between general-purpose and domain-specific
                  benchmark suites. We show that workload-based
                  clustering of benchmark programs increases the
                  specificity of features tested by the suite compared
                  to the inherent hierarchy of a general-purpose
                  suite.  }
}
@inproceedings{healy-bctcs16,
  author = {Andrew Healy and Rosemary Monahan and James F. Power},
  title = {Evaluating {SMT} solvers for software verification},
  booktitle = {32nd British Colloquium of Theoretical Computer Science},
  year = 2016,
  month = {March 22-24},
  address = {Queens University, Belfast, Northern Ireland},
  annote = {Non peer-reviewed}
}
@inproceedings{farrell-bctcs16,
  author = {Marie Farrell and Rosemary Monahan and James F. Power},
  title = {A Logical Framework for Integrating Software Models via Refinement},
  booktitle = {32nd British Colloquium of Theoretical Computer Science},
  year = 2016,
  month = {March 22-24},
  address = {Queens University, Belfast, Northern Ireland},
  annote = {Non peer-reviewed}
}
@inproceedings{healy-arw15,
  author = {Andrew Healy and Rosemary Monahan and James Power},
  title = {Characterising the workload of {SMT} solvers for program verification},
  booktitle = {22nd Workshop on Automated Reasoning},
  year = 2015,
  pages = {17-18},
  month = {April 9-10},
  address = {Birmingham, UK},
  annote = {Abstract, non peer-reviewed}
}
@inproceedings{cheng-volt15,
  author = {Zheng Cheng and Rosemary Monahan and James F. Power},
  title = {Verifying {SimpleGT} Transformations Using an Intermediate
                  Verification Language},
  booktitle = {4th International Workshop on the Verification Of Model Transformations},
  year = 2015,
  month = {July},
  pages = {12--19},
  url = {http://ceur-ws.org/Vol-1530/paper3.pdf},
  address = {L'Aquila, Italy}
}
@inproceedings{cheng-icmt15,
  author = {Zheng Cheng and Rosemary Monahan and James F. Power},
  title = {A Sound Execution Semantics for {ATL} via Translation
                  Validation},
  booktitle = {8th International Conference on Model Transformation},
  year = 2015,
  month = {July 20-21},
  address = {L'Aquila, Italy},
  doi = {10.1007/978-3-319-21155-8_11},
  pages = {133--148},
  abstract = {In this work we present a translation validation
                  approach to encode a sound execution semantics for
                  the ATL specification. Based on our sound encoding,
                  the goal is to reliably verify the ATL specification
                  against the specified OCL contracts. To demonstrate
                  our approach, we have developed the VeriATL
                  verification system using the Boogie2 intermediate
                  verification language, which in turn provides access
                  to the Z3 theorem prover. Our system automatically
                  encodes the execution semantics of each ATL
                  specification (as it appears in the ATL matched
                  rules) into the intermediate verification
                  language. Then, to ensure the soundness of the
                  encoding, we verify that it faithfully represents
                  the runtime behaviour of its corresponding compiled
                  implementation in terms of bytecode instructions for
                  the ATL virtual machine. The experiments demonstrate
                  the feasibility of our approach. They also
                  illustrate how to automatically verify an ATL
                  specification against specified OCL contracts.}
}
@inproceedings{mooney-icep14,
  author = {Aidan Mooney and Joseph Duffin and Thomas Naughton and
                  Rosemary Monahan and James Power and Phil Maguire},
  title = {{PACT}: An initiative to introduce Computational Thinking
                  in to second level education},
  booktitle = {International Conference on Engaging Pedagogy},
  year = 2014,
  address = {Athlone, Ireland},
  month = {December 5},
  abstract = {PACT is a partnership between researchers in the
                  Department of Computer Science at Maynooth
                  University and teachers at selected post-primary
                  schools around Ireland. Starting in September 2013,
                  seven Irish secondary schools took part in a pilot
                  study, delivering material prepared by the PACT team
                  to Transition Year students. Three areas of Computer
                  Science were identified as being key to delivering a
                  successful course in computational thinking, namely,
                  programming, algorithms and computability. An
                  overview of the PACT module is provided, as well as
                  analysis of the feedback obtained from students and
                  teachers involved in delivering the initial pilot.},
  url = {http://eprints.nuim.ie/5880},
  pdf = {2014/mooney-icep14.pdf}
}
@inproceedings{power-cie14,
  author = {James F. Power},
  title = {An Early Completion Algorithm: {T}hue's 1914 Paper on the
                  Transformation of Symbol Sequences},
  booktitle = {10th Conference on Computability in Europe},
  pages = {343--346},
  month = {June 23-27},
  address = {Budapest, Hungary},
  year = {2014},
  doi = {http://dx.doi.org/10.1007/978-3-319-08019-2_35},
  pdf = {2014/power-cie14.pdf},
  abstract = {References to Thue's 1914 paper on string transformation
                  systems are based mainly on a small section of that
                  work defining Thue systems.  A closer study of the
                  remaining parts of that paper highlight a number of
                  important themes in the history of computing: the
                  transition from algebra to formal language theory,
                  the analysis of the "computational power" (in a
                  pre-1936 sense) of rules, and the development of
                  algorithms to generate rule-sets.}
}
@inproceedings{dod-iccc14,
  author = {Diarmuid P. O'Donoghue and James Power and Sian O'Briain
                  and Feng Dong and Aidan Mooney and Donny Hurley and
                  Yalemisew Abgaz and Charles Markham},
  title = {Can a Computationally Creative System Create Itself?
                  {C}reative Artefacts and Creative Processes},
  booktitle = {International Conference on Computational Creativity},
  year = 2014,
  month = {June 10-13},
  pages = {146-155},
  isbn = {978-961-264-055-2},
  address = {Ljubljana, Slovenia},
  pdf = {2014/dod-iccc14.pdf},
  abstract = {This paper begins by briefly looking at two of the
                  dominant perspectives on computational creativity;
                  focusing on the creative artefacts and the creative
                  processes respectively. We briefly describe two
                  projects; one focused on (artistic) creative
                  artefacts the other on a (scientific) creative
                  process, to highlight some similarities and
                  differences in approach. We then look at a
                  2-dimensional model of Learning Objectives that uses
                  independent axes of knowledge and (cognitive)
                  processes.  This educational framework is then used
                  to cast artefact and process perspectives into a
                  common framework, opening up new possibilities for
                  discussing and comparing creativity between
                  them. Finally, arising from our model of creative
                  processes, we propose a new and broad 4-level
                  hierarchy of computational creativity, which asserts
                  that the highest level of computational creativity
                  involves processes whose creativity is comparable to
                  that of the originating process itself.}
}
@inproceedings{power-hapoc13,
  author = {James F. Power},
  title = {Exploring {T}hue's 1914 paper on the transformation of
                  strings according to given rules},
  booktitle = {International Conference on the History and Philosophy
                  of Computing},
  year = 2013,
  address = {Paris, France},
  month = {Oct 28-31},
  pdf = {2013/power-hapoc13.pdf},
  abstract = { Rarely has any paper in the history of computing been
               given such a prestigious introduction as that given
               to Axel Thue's paper by Emil Post in 1947: "Alonzo
               Church suggested to the writer that a certain
               problem of Thue might be proved unsolvable ...".
               However, only the first two pages of Thue's paper
               are directly relevant to Post's proof, and, in this
               abstract, I hope to shed some light on the remaining
               part, and to advocate its relevance for the history
               of computing.  }
}
@inproceedings{wu-tase13,
  author = {Hao Wu and Rosemary Monahan and James F. Power},
  title = {Exploiting attributed type graphs to generate metamodel
                  instances using an {SMT} solver},
  booktitle = {7th International Symposium on Theoretical Aspects of
                  Software Engineering},
  year = 2013,
  month = {July 1-3},
  pages = {175-182},
  doi = {http://doi.ieeecomputersociety.org/10.1109/TASE.2013.31},
  pdf = {2013/wu-tase13.pdf},
  abstract = {In this paper we present an approach to generating
                  instances of metamodels using a Satisfiability
                  Modulo Theories (SMT) solver as a back-end
                  engine. Our goal is to automatically translate a
                  metamodel and its invariants into SMT formulas which
                  can be investigated for satisfiability by an
                  external SMT solver, with each satisfying assignment
                  for SMT formulas interpreted as an instance of the
                  original metamodel.  Our automated translation works
                  by interpreting a metamodel as a bounded Attributed
                  Type Graph with Inheritance (ATGI) and then deriving
                  a finite universe of all bounded attribute graphs
                  typed over this bounded ATGI. The graph acts as an
                  intermediate representation which we then translate
                  into SMT formulas. The full translation process,
                  from metamodels to SMT formulas, and then from SMT
                  instances back to metamodel instances, has been
                  successfully automated in our tool, with the results
                  showing the feasibility of this approach.}
}
@inproceedings{cheng-comp12,
  author = {Zheng Cheng and Rosemary Monahan and James F. Power},
  title = {A Simple Complexity Measurement for Software Verification
               and Software Testing},
  year = {2012},
  pages = {28-31},
  url = {http://ceur-ws.org/Vol-873/papers/paper_8.pdf},
  booktitle = {International Workshop on Comparative
               Empirical Evaluation of Reasoning Systems},
  address = {Manchester, UK},
  month = {June 30},
  series = {CEUR Workshop Proceedings},
  volume = {873},
  pdf = {2012/cheng-comp12.pdf},
  abstract = {In this paper, we used a simple metric (i.e. Lines of
                  Code) to measure the complexity involved in software
                  verification and software testing. The goal is then,
                  to argue for software verification over software
                  testing, and motivate a discussion of how to reduce
                  the complexity in- volved in software
                  verification. We propose to reduce this complexity
                  by translating the software to a simple intermediate
                  representation which can be verified using an
                  efficient verifier, such as Boogie.}
}
@inproceedings{odulaigh-weh12,
  author = {Keith \'O D\'ulaigh and James F. Power and Peter J. Clarke},
  title = {{Measurement of Exception-Handling Code: An Exploratory Study}},
  booktitle = {5th International Workshop on Exception Handling},
  address = {Zurich, Switzerland},
  month = {June 9},
  year = {2012},
  doi = {10.1109/WEH.2012.6226602},
  pages = {55-61},
  abstract = {This paper presents some preliminary results from an
                  empirical study of 12 Java applications from the
                  Qualitas corpus. We measure the quantity and
                  distribution of exception- handling constructs, and
                  study their change as the systems evolve through
                  several versions.},
  pdf = {2012/odulaigh-weh12.pdf}
}
@article{clarke-stvr12,
  author = {Peter J. Clarke and James F. Power and Djuradj Babich and
                  Tariq M. King},
  title = {{A Testing Strategy for Abstract Classes}},
  journal = {Software Testing, Verification and Reliability},
  volume = {22},
  number = {3},
  year = {2012},
  pages = {147-169},
  abstract = {This paper presents a structured approach that supports
                  the testing of features in abstract classes, paying
                  particular attention to ensuring that the features
                  tested are those defined in the abstract class. Two
                  empirical studies are performed on a suite of large
                  Java programs and the results presented. The first
                  study analyzes the role of abstract classes from a
                  testing perspective. The second study investigates
                  the impact of the testing strategy on the programs
                  in this suite to demonstrate its feasibility and to
                  comment on the pragmatics of its use.},
  pdf = {2012/clarke-stvr12-draft.pdf},
  doi = {http://dx.doi.org/10.1002/stvr.429}
}
@inproceedings{babich-sac11,
  author = {Djuradj Babich and Peter J. Clarke and James F. Power and
                  B. M. Golam Kibria},
  title = {{Using a Class Abstraction Technique to Predict Faults in
                  OO
  Classes: A case study through six releases of the Eclipse JDT}},
  booktitle = {ACM Symposium On Applied Computing},
  address = {TaiChung, Taiwan},
  month = {March 21-25},
  year = {2011},
  pages = {1419-1424},
  abstract = {In this paper, we propose an innovative suite of metrics
                  based on a class abstraction that uses a taxonomy
                  for OO classes (CAT) to capture aspects of software
                  complexity through combinations of class
                  characteristics. We empirically validate their
                  ability to predict fault prone classes using fault
                  data for six versions of the Java-based open-source
                  Eclipse Integrated Development Environment. We
                  conclude that this proposed CAT metric suite, even
                  though it treats classes in groups rather than
                  individually, is as effective as the traditional
                  Chidamber and Kemerer metrics in identifying
                  fault-prone classes.},
  doi = {http://dx.doi.org/10.1145/1982185.1982492},
  pdf = {2011/babich-sac11.pdf}
}
@inproceedings{bergin-sigcse11,
  author = {Thomas Whelan and Susan Bergin and James F. Power},
  title = {{Teaching Discrete Structures: A systematic review of the
                  literature}},
  booktitle = {ACM Technical Symposium on Computer Science Education},
  address = {Dallas, Texas, USA},
  month = {March 9-12},
  year = {2011},
  pages = {275-280},
  abstract = {This survey paper reviews a large sample of publications
                  on the teaching of discrete structures and discrete
                  mathematics in computer science curricula. The
                  approach is systematic, in that a structured search
                  of electronic resources has been conducted, and the
                  results are presented and quantitatively analysed. A
                  number of broad themes in discrete structures
                  education are identified relating to course content,
                  teaching strategies and the means of evaluating the
                  success of a course.},
  doi = {http://doi.acm.org/10.1145/1953163.1953247},
  pdf = {2011/whelan-sigcse11.pdf}
}
@inproceedings{wu-sleds10,
  author = {Hao Wu and Rosemary Monahan and James F. Power},
  title = {{Test case generation for programming language
                  metamodels}},
  booktitle = {Doctoral Symposium of the 3rd International Conference
                  on Software Language Engineering},
  address = {Eindhoven, Netherlands},
  month = {October 11},
  year = {2010},
  abstract = {One of the central themes in software language
                  engineering is the specification of programming
                  languages, and domain-specific languages, using a
                  metamodel. One problem associated with the use of
                  programming language metamodels, and metamodels in
                  general, is determining whether or not they are
                  correct. In this context, the question addressed by
                  our research is: given a programming language
                  metamodel, how can we generate an appropriate test
                  suite to show that it is valid?},
  pdf = {2010/wu-sleds10.pdf},
  annote = {Doctoral Symposium}
}
@inproceedings{wu-mtatl10,
  author = {Hao Wu and Rosemary Monahan and James F. Power},
  title = {{Using ATL in a tool-chain to calculate coverage data for
                  UML class diagrams}},
  booktitle = {2nd International Workshop on Model Transformation with
                  ATL},
  address = {Malaga, Spain},
  pages = {60-64},
  month = {June 30},
  year = {2010},
  abstract = {In this paper we describe the use of ATL as part of a
                  tool chain that calculates coverage measures for UML
                  class diagrams. The tool chains uses the USE tool as
                  a parser and validator for UML diagrams, and
                  represents the diagrams internally using the EMF
                  framework.},
  pdf = {2010/wu-mtatl10.pdf}
}
@inproceedings{mcquillan:mtatl09,
  author = {Jacqueline A. McQuillan and James F. Power},
  title = {{White-Box Coverage Criteria for Model Transformations}},
  booktitle = {1st International Workshop on Model Transformation with
                  ATL},
  address = {Nantes, France},
  month = {July 8-9},
  year = {2009},
  pages = {63-77},
  abstract = {Model transformations are core to MDE, and one of the
                  key aspects for all model transformations is that
                  they are validated. In this paper we develop an
                  approach to testing model transformations based on
                  white-box coverage measures of the
                  transformations. To demonstrate the use of this
                  approach we apply it to some examples from the ATL
                  metamodel zoo.},
  pdf = {2009/mcquillan-mtatl09.pdf}
}
@article{aleksy:scp09,
  author = {Markus Aleksy and Vasco Amaral and Ralf Gitzel and James
                  F. Power and John Waldron},
  title = {{Foreword to the special issue on principles and practices
                  of programming in Java}},
  journal = {Science of Computer Programming},
  volume = {74},
  number = {5-6},
  month = {March},
  year = {2009},
  pages = {259-260},
  abstract = {},
  doi = {http://dx.doi.org/10.1016/j.scico.2009.01.008}
}
@article{lambert-qapl08,
  author = {Jonathan Lambert and James F. Power},
  title = {{Platform Independent Timing of Java Virtual Machine
                  Bytecode Instructions}},
  journal = {Electronic Notes in Theoretical Computer Science},
  volume = {220},
  number = {3},
  month = {12 December},
  year = {2008},
  pages = {97-113},
  abstract = {The accurate measurement of the execution time of Java
                  bytecode is one factor that is important in order to
                  estimate the total execution time of a Java
                  application running on a Java Virtual Machine. In
                  this paper we document the difficulties and
                  solutions for the accurate timing of Java
                  bytecode. We also identify trends across the
                  execution times recorded for all imperative Java
                  bytecodes. These trends would suggest that knowing
                  the execution times of a small subset of the Java
                  bytecode instructions would be sufficient to model
                  the execution times of the remainder. We first
                  review a statistical approach for achieving high
                  precision timing results for Java bytecode using low
                  precision timers and then present a more suitable
                  technique using homogeneous bytecode sequences for
                  recording such information. We finally compare
                  instruction execution times acquired using this
                  platform independent technique against execution
                  times recorded using the read time stamp counter
                  assembly instruction. In particular our results show
                  the existence of a strong linear correlation between
                  both techniques.},
  doi = {http://dx.doi.org/10.1016/j.entcs.2008.11.021},
  pdf = {2008/lambert-qapl08.pdf}
}
@article{hennessy-ese08,
  author = {Mark Hennessy and James F. Power},
  title = {{Analysing the effectiveness of rule-coverage as a
                  reduction criterion for test suites of grammar-based
                  software}},
  journal = {Empirical Software Engineering},
  volume = {13},
  number = {4},
  month = {August},
  year = {2008},
  pages = {343-368},
  abstract = {The term grammar-based software describes software whose
                  input can be specified by a context-free
                  grammar. This grammar may occur explicitly in the
                  software, in the form of an input specification to a
                  parser generator, or implicitly, in the form of a
                  hand-written parser. Grammar-based software includes
                  not only programming language compilers, but also
                  tools for program analysis, reverse engineering,
                  software metrics and documentation
                  generation. Hence, ensuring their completeness and
                  correctness is a vital prerequisite for their
                  use. In this paper we propose a strategy for the
                  construction of test suites for grammar based
                  software, and illustrate this strategy using the ISO
                  C++ grammar. We use the concept of grammar-rule
                  coverage as a pivot for the reduction of an
                  implementation-based test suite, and demonstrate a
                  significant decrease in the size of this suite. The
                  effectiveness of this reduced test suite is compared
                  to the original test suite with respect to code
                  coverage and more importantly, fault detection. This
                  work greatly expands upon previous work in this area
                  and utilises large scale mutation testing to compare
                  the effectiveness of grammar-rule coverage to that
                  of statement coverage as a reduction criterion for
                  test suites of grammar-based software. This work
                  finds that when grammar rule coverage is used as the
                  sole criterion for reducing test suites of grammar
                  based software, the fault detection capability of
                  that reduced test suite is greatly diminished when
                  compared to other coverage criteria such as
                  statement coverage.},
  doi = {http://dx.doi.org/10.1007/s10664-008-9067-7}
}
@inproceedings{mcquillan-stvv08,
  author = {Jacqueline A. McQuillan and James F. Power},
  title = {{A Metamodel for the Measurement of Object-Oriented
                  Systems: An Analysis using Alloy}},
  booktitle = {IEEE International Conference on Software Testing
                  Verification and Validation},
  address = {Lillehammer, Norway},
  month = {April 9-11},
  year = {2008},
  pages = {288-297},
  abstract = {This paper presents a MOF-compliant metamodel for
                  calculating software metrics and demonstrates how it
                  is used to generate a metrics tool that calculates
                  coupling and cohesion metrics. We also describe a
                  systematic approach to the analysis of MOF-compliant
                  metamodels and illustrate the approach using the
                  presented metamodel. In this approach, we express
                  the metamodel using UML and OCL and harness existing
                  automated tools in a framework that generates a Java
                  implementation and an Alloy specification of the
                  metamodel, and use this both to examine the
                  metamodel constraints, and to generate
                  instantiations of the metamodel. Moreover, we
                  describe how the approach can be used to generate
                  test data for any software based on a MOF-compliant
                  metamodel. We extend our framework to support this
                  approach and use it to generate a test suite for the
                  metrics calculation tool that is based on our
                  metamodel.},
  pdf = {2008/mcquillan-stvv08.pdf},
  doi = {http://dx.doi.org/10.1109/ICST.2008.58}
}
@article{aleksy-scp08,
  author = {Markus Aleksy and Ralf Gitzel and John Waldron and James
                  F. Power},
  title = {{PPPJ 2006 special issue - foreword}},
  journal = {Science of Computer Programming},
  volume = {70},
  number = {2-3},
  month = {February},
  year = {2008},
  pages = {87-88},
  abstract = {This issue of Science of Computer Programming is devoted
                  to selected papers from the International Conference
                  on Principles and Practices of Programming in Java
                  (PPPJ) 2006, held in Mannheim, Germany.},
  doi = {http://dx.doi.org/10.1016/j.scico.2007.07.002}
}
@article{kraft-scp07,
  author = {Nicholas A. Kraft and Brian A. Malloy and James F. Power},
  title = {{A tool chain for reverse engineering C++ applications}},
  journal = {Science of Computer Programming},
  volume = {69},
  number = {1-3},
  month = {1 December},
  year = {2007},
  pages = {3-13},
  abstract = {We describe a tool chain that enables experimentation
                  and study of real C++ applications. Our tool chain
                  enables reverse engineering and program analysis by
                  exploiting gcc, and thus accepts any C++ application
                  that can be analyzed by the C++ parser and front end
                  of gcc. Our current test suite consists of large,
                  open-source applications with diverse problem
                  domains, including language processing and
                  gaming. Our tool chain is designed using a GXL-based
                  pipe-filter architecture; therefore, the individual
                  applications and libraries that constitute our tool
                  chain each provide a point of access. The preferred
                  point of access is the g4api Application Programming
                  Interface (API), which is located at the end of the
                  chain. g4api provides access to information about
                  the C++ program under study, including information
                  about declarations, such as classes (including
                  template instantiations); namespaces; functions; and
                  variables, statements, and some expressions. Access
                  to the information is via either a pointer to the
                  global namespace, or a list interface.},
  doi = {http://dx.doi.org/10.1016/j.scico.2007.01.012}
}
@inproceedings{clarke-issre07,
  author = {Peter J. Clarke and Djuradj Babich and Tariq M. King and
                  James F. Power},
  title = {{Intra-Class Testing of Abstract Class Features}},
  booktitle = {18th IEEE International Symposium on Software
                  Reliability Engineering},
  address = {Trollhattan, Sweden},
  month = {5-9 November},
  year = {2007},
  pages = {191-200},
  abstract = {This paper presents a structured approach that supports
                  the testing of features in abstract classes. Core to
                  the approach is a series of static analysis steps
                  that build a comprehensive view of the inter-class
                  dependencies in the system under test. We then
                  leveraged this information to define a test order
                  for the methods in an abstract class that minimizes
                  the number of stubs required during testing, and
                  clearly identifies the required functionality of
                  these stubs. Our approach is based on a
                  comprehensive taxonomy of object-oriented classes
                  that provides a framework for our analysis. First we
                  describe the algorithms to calculate the inter-class
                  dependencies and the test-order that minimizes stub
                  creation. Then we give an overview of our tool,
                  AbstractTestJ that implements our approach by
                  generating a test order for the methods in an
                  abstract Java class. Finally, we harness this tool
                  to provide an analysis of 12 substantial Java
                  applications that demonstrates both the feasibility
                  of our approach and the importance of this
                  technique.},
  pdf = {2007/clarke-issre07.pdf},
  doi = {http://dx.doi.org/10.1109/ISSRE.2007.4}
}
@inproceedings{kearney-seke07,
  author = {Steven Kearney and James F. Power},
  title = {{REM4j - A framework for measuring the reverse engineering
  capability of UML CASE tools}},
  booktitle = {19th International Conference on Software Engineering
                  and Knowledge Engineering},
  address = {Boston, USA},
  month = {9-11 July},
  year = {2007},
  pages = {209-214},
  abstract = {Reverse Engineering is becoming increasingly important
                  in the software development world today as many
                  organizations are battling to understand and
                  maintain old legacy systems. Today's software
                  engineers have inherited these legacy systems which
                  they may know little about yet have to maintain,
                  extend and improve. Currently there is no framework
                  or strategy that an organisation can use to
                  determine which UML CASE tool to use. This paper
                  sets down such a framework, to allow organisations
                  to base their tool choice on this reliable
                  framework. We present the REM4j tool, an automated
                  tool, for benchmarking UML CASE tools, we then use
                  REM4j to carry out one such evaluation with eleven
                  UML CASE tools. This framework allows us to reach a
                  conclusion as to which is the most accurate and
                  reliable UML CASE tool.},
  pdf = {2007/kearney-seke07.pdf}
}
@inproceedings{mcquillan-models07,
  author = {Jacqueline A. McQuillan and James F. Power},
  title = {{On the application of software metrics to UML models}},
  booktitle = {Models in Software Engineering - Workshops and Symposia
                  at MoDELS 2006, Reports and Revised Selected
                  Papers},
  address = {Springer Lecture Notes in Computer Science},
  month = {Vol. 4364},
  year = {2007},
  pages = {217- 226},
  abstract = {In this position paper we discuss a number of issues
                  relating to model metrics, with particular emphasis
                  on metrics for UML models. Our discussion is
                  presented as a series of nine observations where we
                  examine some of the existing work on applying
                  metrics to UML models, present some of our own work
                  in this area, and specify some topics for future
                  research that we regard as important. Furthermore,
                  we identify three categories of challeges for model
                  metrics and describe how our nine observations can
                  be partitioned into these categories.},
  pdf = {2007/mcquillan-models07.pdf},
  doi = {http://dx.doi.org/10.1007/978-3-540-69489-2_27}
}
@article{kraft-ist07,
  author = {Nicholas A. Kraft and Brian A. Malloy and James F. Power},
  title = {{An Infrastructure to Support
Interoperability in Reverse Engineering}},
  journal = {Information and Software Technology},
  volume = {49},
  number = {3},
  month = {March},
  year = {2007},
  pages = {292-307},
  abstract = {The reverse engineering community has recognized the
                  importance of interoperability, the cooperation of
                  two or more systems to enable the exchange and
                  utilization of data, and has noted that the current
                  lack of interoperability is a contributing factor to
                  the lack of adoption of available
                  infrastructures. To address the problems of
                  interoperability and reproducing previous results,
                  we present an infrastructure that supports
                  interoperability among reverse engineering tools and
                  applications. We present the design of our
                  infrastructure, including the hierarchy of schemas
                  that captures the interactions among graph
                  structures. We also develop and utilize our
                  implementation, which is designed using a GXL-based
                  pipe-filter architecture, to perform a case study
                  that demonstrates the feasibility of our
                  infrastructure.},
  pdf = {2007/kraft-ist07-draft.pdf},
  doi = {http://dx.doi.org/10.1016/j.infsof.2006.10.014}
}
@inproceedings{hennessy-cascon06,
  author = {Mark Hennessy and James F. Power},
  title = {{Ensuring behavioural equivalence in test-driven porting}},
  booktitle = {16th Annual International Conference on Computer
                  Science and Software Engineering: Dublin Symposium},
  address = {Dublin, Ireland},
  month = {October 17},
  year = {2006},
  abstract = {In this paper we present a test-driven approach to
                  porting code from one object-oriented language to
                  another. We derive an order for the porting of the
                  code, along with a testing strategy to verify the
                  behaviour of the ported system at intra and
                  inter-class level. We utilise the recently defined
                  methodology for porting C++ applications, eXtreme
                  porting, as a framework for porting. This defines a
                  systematic routine based upon porting and
                  unit-testing classes in turn. We augment this
                  approach by using Object Relation Diagrams to define
                  an order for porting that minimises class
                  stubbing. Since our strategy is class-oriented and
                  test-driven, we can ensure the structural
                  equivalence of the ported system, along with the
                  limited behavioural equivalence of each class. In
                  order to extend this to integration-level
                  equivalence, we exploit aspect-oriented programming
                  to generate UML sequence diagrams, and we present a
                  technique to compare such automatically-generated
                  diagrams for equivalence. We demonstrate and
                  evaluate our approach using a case study that
                  involves porting an application from C++ to Java.},
  url = {http://witanweb.ca/cascon2006/CFP_2006.jsp},
  pdf = {2006/hennessy-cascon06.pdf}
}
@inproceedings{mcquillan-msm06,
  author = {Jacqueline A. McQuillan and James F. Power},
  title = {{Some observations on the application of software metrics
                  to UML models}},
  booktitle = {MoDELS/UML Workshop on Model Size Metrics},
  address = {Genova, Italy},
  month = {October 3},
  year = {2006},
  abstract = {In this position paper we discuss some of the existing
                  work on applying metrics to UML models, present some
                  of our own work in this area, and specify some
                  topics for future research that we regard as
                  important.},
  pdf = {2006/mcquillan-msm06.pdf}
}
@inproceedings{mcquillan-pppj06,
  author = {Jacqueline A. McQuillan and James F. Power},
  title = {{Experiences of using the Dagstuhl Middle Metamodel for
  defining software metrics}},
  booktitle = {Principles and Practice of Programming in Java},
  address = {Mannheim, Germany},
  month = {August 30 - September 1},
  year = {2006},
  pages = {194-198},
  abstract = {In this paper we report on our experiences of using the
                  Dagstuhl Middle Metamodel as a basis for defining a
                  set of software metrics. This approach involves
                  expressing the metrics as Object Constraint Language
                  queries over the metamodel. We provide details of a
                  system for specifying Java-based software metrics
                  through a tool that instantiates the metamodel from
                  Java class files and a tool that automatically
                  generates a program to calculate the expressed
                  metrics. We present details of an exploratory data
                  analysis of some cohesion metrics to illustrate the
                  use of our approach.},
  pdf = {2006/mcquillan-pppj06.pdf},
  doi = {http://doi.acm.org/10.1145/1168054.1168082}
}
@inproceedings{mcquillan-ecoop06,
  author = {Jacqueline A. McQuillan and James F. Power},
  title = {{Towards re-usable metric definitions at the meta-level}},
  booktitle = {PhD Workshop of the 20th European Conference on
                  Object-Oriented Programming},
  address = {Nantes, France},
  month = {July 3-7},
  year = {2006},
  abstract = {A large number of metrics for evaluating the quality of
                  software have been proposed in the
                  literature. However, there is no standard
                  terminology or formalism for dening metrics and
                  consequently many of the metrics proposed have some
                  ambiguity in their denitions. This hampers the
                  empirical validation of these metrics. To address
                  this problem, we generalise an existing approach to
                  dening metrics that is based on the Object
                  Constraint Language and the Unied Modelling Language
                  metamodel. We have developed a prototype tool called
                  DMML (Dening Metrics at the Meta Level) that
                  supports this approach and we present details of
                  this tool. To illustrate the approach, we present
                  formal denitions for the Chidamber and Kemerer
                  metrics suite.},
  pdf = {2006/mcquillan-ecoop06.pdf},
  annote = {PhD Workshop}
}
@article{malloy-stvr06,
  author = {Brian A. Malloy and James F. Power},
  title = {{Exploiting design patterns to automate validation of class
                  invariants}},
  journal = {Software Testing, Verification and Reliability},
  volume = {16},
  number = {2},
  month = {June},
  year = {2006},
  pages = {71-95},
  abstract = {In this paper, techniques are presented that exploit two
                  design patterns, the Visitor pattern and the
                  Decorator pattern, to automatically validate
                  invariants about the data attributes in a C++
                  class. To investigate the pragmatics involved in
                  using the two patterns, a study of an existing,
                  well-tested application, keystone, a parser and
                  front-end for the C++ language, is
                  presented. Results from the study indicate that
                  these two patterns provide flexibility in terms of
                  the frequency and level of granularity of validation
                  of the class invariants, which are expressed in the
                  Object Constraint Language, OCL. The quantitative
                  results measure the impact of these approaches and
                  the additional faults uncovered through validation
                  of the case study.},
  pdf = {2006/malloy-stvr06-draft.pdf},
  doi = {http://dx.doi.org/10.1002/stvr.327}
}
@article{mitchell-scp06,
  author = {\'Aine Mitchell and James F. Power},
  title = {{A study of the influence of coverage on the relationship
  between static and dynamic coupling metrics}},
  journal = {Science of Computer Programming},
  volume = {59},
  number = {1-2},
  month = {January},
  year = {2006},
  pages = {4-25},
  abstract = {This paper examines the relationship between the static
                  coupling between objects (CBO) metric and some of
                  its dynamic counterparts. The dimensions of the
                  relationship for Java programs are investigated, and
                  the influence of instruction coverage on this
                  relationship is measured. An empirical evaluation of
                  14 Java programs taken from the SPEC JVM98 and the
                  JOlden benchmark suites is conducted using the
                  static CBO metric, six dynamic metrics and
                  instruction coverage data. The results presented
                  here confirm preliminary studies indicating the
                  independence of static and dynamic coupling metrics,
                  but point to a strong influence of coverage on the
                  relationship. Based on this, this paper suggests
                  that dynamic coupling metrics might be better
                  interpreted in the context of coverage measures,
                  rather than as stand-alone software metrics.},
  pdf = {2006/mitchell-scp06-draft.pdf},
  doi = {http://dx.doi.org/10.1016/j.scico.2005.07.002}
}
@inproceedings{kraft-wcre05,
  author = {Nicholas A. Kraft and Brian A. Malloy and James F. Power},
  title = {{Toward an Infrastructure to Support Interoperability in
                  Reverse Engineering}},
  booktitle = {12th Working Conference on Reverse Engineering},
  address = {Pittsburgh, Pennsylvania, USA},
  month = {November 8-11},
  year = {2005},
  pages = {196-205},
  abstract = {In this paper we present an infrastructure that supports
                  interoperability among various reverse engineering
                  tools and applications. We include an Application
                  Programmer's Interface that permits extraction of
                  information about declarations, including classes,
                  functions and variables, as well as information
                  about scopes, types and control statements in C++
                  applications. We also present a hierarchy of
                  canonical schemas that capture minimal functionality
                  for middle-level graph structures. This hierarchy
                  facilitates an unbiased comparison of results for
                  different tools that implement the same or a similar
                  schema. We have a repository, hosted by
                  SourceForge.net, where we have placed the artifacts
                  of our infrastructure.},
  doi = {http://dx.doi.org/10.1109/WCRE.2005.32},
  pdf = {2005/kraft-wcre05.pdf}
}
@inproceedings{hennessy-ase05,
  author = {Mark Hennessy and James F. Power},
  title = {{An Analysis of Rule Coverage as a Criterion in Generating
 Minimal Test Suites for Grammar-Based Software}},
  booktitle = {20th IEEE/ACM International Conference on Automated
                  Software Engineering},
  address = {Long Beach, California, USA},
  month = {November 7-11},
  year = {2005},
  pages = {104-113},
  abstract = {The term grammar-based software describes software whose
                  input can be specified by a context-free
                  grammar. This grammar may occur explicitly in the
                  software, in the form of an input specification to a
                  parser generator, or implicitly, in the form of a
                  hand-written parser, or other input-verification
                  routines. Grammar-based software includes not only
                  programming language compilers, but also tools for
                  program analysis, reverse engineering, software
                  metrics and documentation generation. Such tools
                  often play a crucial role in automated software
                  development, and ensuring their completeness and
                  correctness is a vital prerequisite for their
                  use. In this paper we propose a strategy for the
                  construction of test suites for grammar based
                  software, and illustrate this strategy using the ISO
                  C++ grammar. We use the concept of rule coverage as
                  a pivot for the reduction of implementation-based
                  and specification-based test suites, and demonstrate
                  a significant decrease in the size of these
                  suites. To demonstrate the validity of the approach,
                  we use the reduced test suite to analyze three
                  grammar-based tools for C++. We compare the
                  effectiveness of the reduced test suite with the
                  original suite in terms of code coverage and fault
                  detection.},
  doi = {http://doi.acm.org/10.1145/1101908.1101926},
  pdf = {2005/hennessy-ase05.pdf}
}
@inproceedings{kraft-dagstuhl05,
  author = {Nicholas A. Kraft and Brian A. Malloy and James F. Power},
  title = {{g4re: Harnessing GCC to Reverse Engineer C++
                  Applications}},
  booktitle = {Transformation Techniques in Software Engineering:
                  Dagstuhl Seminar Proceedings No. 05161},
  address = {IBFI, Schloss Dagstuhl, Germany},
  month = {April 17-22},
  year = {2005},
  abstract = {In this paper, we describe g4re, our tool chain that
                  exploits GENERIC, an intermediate format
                  incorporated into the gcc C++ compiler, to
                  facilitate analysis of real C++ applications. The
                  gcc GENERIC representation is available through a
                  file generated for each translation unit (tu), and
                  g4re reads each tu file and constructs a
                  corresponding Abstract Semantic Graph (ASG). Since
                  tu files can be prohibitively large, ranging from 10
                  megabytes for a "hello world" program, to 18
                  gigabytes for a version of Mozilla Thunderbird, we
                  describe our approach for reducing the size of the
                  generated ASG.},
  url = {http://drops.dagstuhl.de/portals/05161/},
  pdf = {2005/kraft-dagstuhl05.pdf}
}
@inproceedings{malloy-vlhcc05,
  author = {Brian A. Malloy and James F. Power},
  title = {{Using a Molecular Metaphor to Facilitate Comprehension of
                  3D Object Diagrams}},
  booktitle = {IEEE Symposium on Visual Languages and Human-Centric
                  Computing},
  address = {Dallas, Texas, USA},
  month = {September 20-24},
  year = {2005},
  pages = {233-240},
  abstract = {This paper presents a strategy for the visualization of
                  dynamic object relationships in Java programs. The
                  metaphor of a chemical molecule is used to aid
                  comprehension, and to help in reducing the size of
                  the object graph. Our strategy has been implemented
                  by dynamically instrumenting Java bytecode to
                  collect trace data, which is then analyzed and
                  visualized in 3D using VRML. Quantitative and
                  graphical results are presented, based on an
                  analysis of programs in the SPEC JVM98 and JOlden
                  benchmark suites.},
  doi = {http://dx.doi.org/10.1109/VLHCC.2005.66},
  pdf = {2005/malloy-vlhcc05.pdf}
}
@article{sinclair-entcs05,
  author = {David Sinclair and James F. Power},
  title = {{Specifying and Verifying Communications Protocols using
                  Mixed Intuitionistic Linear Logic}},
  journal = {Electronic Notes in Theoretical Computer Science},
  volume = {133},
  month = {31 May},
  year = {2005},
  pages = {255-273},
  abstract = {This paper presents an outline specification of the IP
                  and TCP communication protocols in mixed
                  intuitionistic linear logic and describes how this
                  logic can be used to prove some properties of both
                  protocols. We have previously presented a
                  specification of IP in using commutative linear
                  logic; in this paper we extend this specification
                  considerably to include TCP, which, in turn,
                  necessitates the use of non-commutative operators.},
  doi = {http://dx.doi.org/10.1016/j.entcs.2004.08.068},
  pdf = {2004/sinclair-fmics04.pdf}
}
@inproceedings{malloy-softvis05,
  author = {Brian A. Malloy and James F. Power},
  title = {{Exploiting UML dynamic object modeling for the
                  visualization
of C++ programs}},
  booktitle = {ACM Symposium on Software Visualization},
  address = {St. Louis, Missouri, USA},
  month = {May 15-16},
  year = {2005},
  pages = {105-114},
  abstract = {In this paper we present an approach to modeling and
                  visualizing the dynamic interactions among objects
                  in a C++ application. We exploit UML diagrams to
                  expressively visualize both the static and dynamic
                  properties of the application. We make use of a
                  class diagram and call graph of the application to
                  select the parts of the application to be modeled,
                  thereby reducing the number of objects and methods
                  under consideration with a concomitant reduction in
                  the cognitive burden on the user of our system. We
                  use aspects to insert probes into the application to
                  enable profiling of the interactions of objects and
                  methods and we visualize these interactions by
                  providing sequence and communication diagrams for
                  the parts of the program under consideration. We
                  complement our static selectors with dynamic
                  selectors that enable the user to further filter
                  objects and methods from the sequence and
                  communication diagrams, further enhancing the
                  cognitive economy of our system. A key feature of
                  our approach is the provision for dynamic
                  interaction with both the profiler and the
                  application. Interaction with the profiler enables
                  filtering of methods and objects. Interaction with
                  the application enables the user to supply input to
                  the application to provide direction and enhance
                  comprehension or debugging.},
  doi = {http://doi.acm.org/10.1145/1056018.1056033},
  pdf = {2005/malloy-softvis05.pdf}
}
@article{power-scp05,
  author = {James F. Power and John T. Waldron},
  title = {{Editorial: Special Issue on the Principles and Practice of
Programming in Java}},
  journal = {Science of Computer Programming},
  volume = {54},
  number = {1},
  month = {January},
  year = {2005},
  pages = {1-2},
  abstract = {It gives us great pleasure to present this special
                  issue, containing papers from the conference on the
                  Principles and Practice of Programming in Java held
                  in Kilkenny City, Ireland, in June 2003. All authors
                  of full papers presented at PPPJ 2003 were invited
                  to submit revised and extended version of their
                  papers for this special issue. These papers were
                  rigorously reviewed, resulting in the six papers
                  presented here.},
  doi = {http://dx.doi.org/10.1016/j.scico.2004.05.002},
  pdf = {2005/power-scp05.pdf}
}
@inproceedings{brown-iasted05,
  author = {Stephen Brown and \'Aine Mitchell and James F. Power},
  title = {{A Coverage Analysis of Java
Benchmark Suites}},
  booktitle = {The IASTED International Conference on Software
                  Engineering},
  address = {Innsbruck, Austria},
  month = {February 15-17},
  year = {2005},
  pages = {144-150},
  abstract = {The Java programming language provides an almost ideal
                  environment for both static and dynamic analysis,
                  being easy to parse, and supporting a standardised,
                  easily-profiled virtual environment. In this paper
                  we study the relationship between results obtainable
                  from static and dynamic analysis of Java programs,
                  and in particular the difficulties of correlating
                  static and dynamic results. As a foundation for this
                  study, we focus on various criteria related to
                  run-time code coverage, as commonly used in test
                  suite analysis. We have implemented a dynamic
                  coverage analysis tool for Java programs, and we use
                  it to evaluate several standard Java benchmark
                  suites using line, instruction and branch coverage
                  criteria. We present data indicating a considerable
                  variance in static and dynamic analysis results
                  between these suites, and even between programs in
                  these suites.},
  pdf = {2005/brown-iasted05.pdf}
}
@inproceedings{mitchell-sac05,
  author = {\'Aine Mitchell and James F. Power},
  title = {{Using Object-Level Run-Time
Metrics to Study Coupling Between Objects}},
  booktitle = {20th Annual ACM Symposium on Applied Computing},
  address = {Santa Fe, New Mexico},
  month = {March 13 -17},
  year = {2005},
  pages = {1456 - 1462},
  abstract = {In this paper we present an investigation into the
                  run-time behaviour of objects in Java programs,
                  using specially adapted coupling metrics. We
                  identify objects from the same class that exhibit
                  non-uniform coupling behaviour when measured
                  dynamically. We define a number of object level
                  run-time metrics, based on the static Chidamber and
                  Kemerer coupling between objects (CBO)
                  measure. These new metrics seek to quantify coupling
                  at different layers of granularity, that is at
                  class-class and object-class level. We outline our
                  method of collecting such metrics and present a
                  study of the programs from the JOlden benchmark
                  suite as an example of their use. A number of
                  statistical techniques, principally agglomerative
                  hierarchical clustering analysis, are used to
                  facilitate the identification of such objects.},
  doi = {http://doi.acm.org/10.1145/1066677.1067010},
  pdf = {2005/mitchell-sac05.pdf}
}
@article{gregg-ccpe05,
  author = {David Gregg and James F. Power and John Waldron},
  title = {{A method-level comparison of the Java Grande and SPEC
                  JVM98 benchmark suites}},
  journal = {Concurrency and Computation: Practice and Experience},
  volume = {17},
  number = {7-8},
  month = {June-July},
  year = {2005},
  pages = {757-773},
  abstract = {In this paper we seek to provide a foundation for the
                  study of the level of use of object-oriented
                  techniques in Java programs in general, and
                  scientific applications in particular. In particular
                  we investigate the profiles of Java programs from a
                  number of perspectives, including the use of class
                  library methods, the size of methods called, the
                  mode of invoke instruction used and the
                  polymorphicity of call sites. We also present a
                  categorisation of the nature of small-sized methods
                  used in Java programs. We compare the Java Grande
                  and SPEC JVM98 benchmark suites, and note a
                  significant difference in the nature and composition
                  of these suites, with the programs from the Java
                  Grande suite demonstrating a less object-oriented
                  approach.},
  doi = {http://dx.doi.org/10.1002/cpe.846},
  pdf = {2005/gregg-ccpe05-draft.pdf}
}
@article{power-sme04,
  author = {James F. Power and Brian A. Malloy},
  title = {{A metrics suite for grammar-based software}},
  journal = {Software Maintenance and Evolution: Research and
                  Practice},
  volume = {16},
  number = {6},
  month = {November/December},
  year = {2004},
  pages = {405-426},
  doi = {10.1002/smr.293},
  abstract = {One approach to measuring and managing the complexity of
                  software, as it evolves over time, is to exploit
                  software metrics. Metrics have been used to estimate
                  the complexity of the maintenance effort, to
                  facilitate change impact analysis, and as an
                  indicator for automatic detection of a
                  transformation that can improve the quality of a
                  system. However, there has been little effort
                  directed at applying software metrics to the
                  maintenance of grammar-based software applications,
                  such as compilers, editors, program comprehension
                  tools and embedded systems. In this paper, we adapt
                  the software metrics that are commonly used to
                  measure program complexity and apply them to the
                  measurement of the complexity of grammar-based
                  software applications. Since the behaviour of a
                  grammar-based application is typically choreographed
                  by the grammar rules, the measure of complexity that
                  our metrics provide can guide maintainers in
                  locating problematic areas in grammar-based
                  applications.},
  url = {http://www3.interscience.wiley.com/cgi-bin/jhome/77004487},
  pdf = {2004/power-sme04.pdf}
}
@article{power-jucs04,
  author = {James F. Power and David Sinclair},
  title = {{A Formal Model of Forth Control Words in the
                  Pi-Calculus}},
  journal = {Journal of Universal Computer Science},
  volume = {10},
  number = {9},
  month = {September},
  year = {2004},
  pages = {1272-1293},
  doi = {10.3217/jucs-010-09-1272},
  abstract = {In this paper we develop a formal specification of
                  aspects of the Forth programming language. We
                  describe the operation of the Forth compiler as it
                  translates Forth control words, dealing in
                  particular with the interpretation of immediate
                  words during compilation. Our goal here is to
                  provide a basis for the study of safety properties
                  of embedded systems, many of which are constructed
                  using Forth or Forth-like languages. To this end we
                  construct a model of the Forth compiler in the
                  pi-calculus, and have simulated its execution by
                  animating this model using the Pict programming
                  language.},
  url = {http://www.jucs.org/jucs_10_9},
  pdf = {2004/power-jucs04.pdf}
}
@inproceedings{donoghue-esa04,
  author = {Diarmuid O'Donoghue and James F. Power},
  title = {{Identifying and evaluating a generic set of
                  superinstructions for embedded Java programs}},
  booktitle = {International Conference on Embedded Systems and
                  Applications},
  address = {Las Vegas, Nevada, USA},
  month = {June 21-24},
  year = {2004},
  pages = {192-198},
  abstract = {In this paper we present an approach to the optimisation
                  of interpreted Java programs using
                  superinstructions. Unlike existing techniques, we
                  examine the feasibility of identifying a generic set
                  of superinstructions across a suite of programs, and
                  implementing them statically on a JVM. We formally
                  present the sequence analysis algorithm and we
                  describe the resulting sets of superinstructions for
                  programs from the embedded CaffeineMark benchmark
                  suite. We have implemented the approach on the Jam
                  VM, a lightweight JVM, and we present results
                  showing the level of speedup possible from this},
  pdf = {2004/donoghue-esa04.pdf}
}
@inproceedings{mitchell-serp04,
  author = {\'Aine Mitchell and James F. Power},
  title = {{Run-Time Cohesion Metrics: An Empirical Investigation}},
  booktitle = {International Conference on Software Engineering
                  Research and Practice},
  address = {Las Vegas, Nevada, USA},
  month = {June 21-24},
  year = {2004},
  pages = {532-537},
  abstract = {Cohesion is one of the fundamental measures of the
                  'goodness' of a software design. The most accepted
                  and widely studied object-oriented cohesion metric
                  is Chidamber and Kemerer's Lack of Cohesion in
                  Methods measure. However due to the nature of
                  object-oriented programs, static design metrics fail
                  to quantify all the underlying dimensions of
                  cohesion, as program behaviour is a function of it
                  operational environment as well as the complexity of
                  the source code. For these reasons two run-time
                  object-oriented cohesion metrics are described in
                  this paper, and applied to Java programs from the
                  SPECjvm98 benchmark suite. A statistical analysis is
                  conducted to assess the fundamental properties of
                  the measures and investigate whether they are
                  redundant with respect to the static cohesion
                  metric. Results to date indicate that run-time
                  cohesion metrics can provide an interesting and
                  informative qualitative analysis of a program and
                  complement existing static cohesion metrics.},
  pdf = {2004/mitchell-serp04.pdf}
}
@inproceedings{mitchell-pppj04,
  author = {\'Aine Mitchell and James F. Power},
  title = {{An Empirical Investigation into the Dimensions of Run-Time
  Coupling Metrics in Java programs}},
  booktitle = {The Third International Conference on the Principles
                  and Practice of Programming in Java},
  address = {Las Vegas, Nevada, USA},
  month = {June 16-18},
  year = {2004},
  pages = {9-14},
  abstract = {Software quality is an important external software
                  attribute that is difficult to measure
                  objectively. Several studies have identified a clear
                  empirical relationship between static coupling
                  metrics and software quality. However due to the
                  nature of object-oriented program static metrics
                  fail to quantify all the underlying dimensions of
                  coupling, as program behaviour is a function of it
                  operational environment as well as the complexity of
                  the source code. In this paper a set of run-time
                  object-oriented coupling metrics are described and
                  empirically validated in terms of their usefulness
                  in predicting software quality. Preliminary results
                  indicate that run-time coupling metrics can provide
                  an interesting and informative qualitative analysis
                  of a program and complement existing static coupling
                  metrics.},
  url = {http://portal.acm.org/citation.cfm?id=1071568},
  pdf = {2004/mitchell-pppj04.pdf}
}
@inproceedings{mitchell-wisict04,
  author = {\'Aine Mitchell and James F. Power},
  title = {{An approach to quantifying the run-time behaviour of Java
                  GUI applications}},
  booktitle = {Winter International Symposium on Information and
                  Communication Technologies},
  address = {Cancun, Mexico},
  month = {January 5-8},
  year = {2004},
  abstract = {This paper outlines a new technique for collecting
                  dynamic trace information from Java GUI
                  programs. The problems of collecting run-time
                  information from such interactive applications in
                  comparison with traditional batch style execution
                  benchmark programs is outlined. The possible utility
                  of such run-time information is discussed and from
                  this a number of simple run-time metrics are
                  suggested. The metrics results for a small
                  CelsiusConverter Java GUI program are illustrated to
                  demonstrate the viability of such an analysis.},
  url = {http://portal.acm.org/citation.cfm?id=984737},
  pdf = {2004/mitchell-wisict04.pdf}
}
@article{malloy-ddj03,
  author = {Brian A. Malloy and James. F. Power and Tanton H. Gibbs},
  title = {{C++ Compilers and ISO Conformance}},
  journal = {Dr. Dobb's Journal},
  volume = {28},
  number = {11},
  month = {November},
  year = {2003},
  pages = {54-60},
  abstract = {In this article we revisit the C++ conformance study we
                  presented in "Testing C++ Compilers for ISO Language
                  Conformance" (DDJ, June 2002). In doing so, we
                  provide some measure of conformance to the ISO
                  Standard for eight C++ compilers: Borland 6.0,
                  Comeau 4.3.2, EDG 3.2, gcc 3.3, Intel 7.1, PGCC
                  4.1-2, Visual C++ 7.1 and Watcom 1.0.},
  url = {http://www.ddj.com/articles/2003/0311/}
}
@inproceedings{mitchell-qaoose03,
  author = {\'Aine Mitchell and James F. Power},
  title = {{Toward a definition of run-time object-oriented metrics}},
  booktitle = {7th ECOOP Workshop on Quantitative Approaches in
                  Object-Oriented Software Engineering},
  address = {Darmstadt, Germany},
  month = {July 21-25},
  year = {2003},
  abstract = {This position paper outlines a programme of research
                  based on the quantification of run-time elements of
                  Java programs. In particular, we adapt two common
                  objectoriented metrics, coupling and cohesion, so
                  that they can be applied at run-time. We demonstrate
                  some preliminary results of our analysis on programs
                  from the SPEC JVM98 benchmark suite.},
  pdf = {2003/mitchell-qaoose03.pdf}
}
@inproceedings{hennessy-iwpc03,
  author = {Mark Hennessy and Brian Malloy and James Power},
  title = {{gccXfront: Exploiting gcc as a Front End for Program
                  Comprehension Tools via XML/XSLT}},
  booktitle = {11th IEEE International Workshop on Program
                  Comprehension, (Tool Demo)},
  address = {Portland, Oregon, USA},
  month = {May 9-11},
  year = {2003},
  pages = {298-299},
  abstract = {Parsing programming languages is an essential component
                  of the front end of most program comprehension
                  tools. Languages such as C++ can be difficult to
                  parse and so it can prove useful to re-use existing
                  front ends such as those from the GNU compiler
                  collection, gcc. We have modified gcc to provide
                  syntactic tags in XML format around the source code
                  which can greatly enhance our comprehension of the
                  program structure. Further, by using XML
                  transformation stylesheets, the XML outputted by our
                  modified gcc can be translated into a more readable
                  format. Our tool, gccXfront leverages the power and
                  portability of the gcc suite, since any C, C++,
                  Objective C or Java program can be processed using
                  gcc. Our tool can thus act as a bridge between gcc
                  and other program comprehension tools that accept
                  XML formatted input.},
  url = {http://ieeexplore.ieee.org/xpl/tocresult.jsp?isNumber=26998},
  pdf = {2003/hennessy-iwpc03.pdf},
  annote = {Tool Demo}
}
@article{gregg-ccpe03,
  author = {David Gregg and James Power and John Waldron},
  title = {{Platform independent dynamic Java virtual machine
                  analysis: the Java Grande Forum benchmark suite}},
  journal = {Concurrency and Computation: Practice and Experience},
  volume = {15},
  number = {3-5},
  month = {March},
  year = {2003},
  pages = {459-484},
  doi = {10.1002/cpe.666},
  abstract = {In this paper we present a platform independent analysis
                  of the dynamic profiles of Java programs when
                  executing on the Java Virtual Machine. The Java
                  programs selected are taken from the Java Grande
                  Forum benchmark suite and five different
                  Java-to-bytecode compilers are analysed. The results
                  presented describe the dynamic instruction usage
                  frequencies, as well as the sizes of the local
                  variable, parameter and operand stacks during
                  execution on the JVM.These results, presenting a
                  picture of the actual (rather than presumed)
                  behaviour of the JVM, have implications both for the
                  coverage aspects of the Java Grande benchmark
                  suites, for the performance of the Java-to-bytecode
                  compilers and for the design of the JVM.},
  url = {http://www3.interscience.wiley.com/cgi-bin/jissue/102530974}
}
@article{malloy-spe03,
  author = {Brian A. Malloy and Tanton H. Gibbs and James F. Power},
  title = {{Decorating tokens to facilitate recognition of ambiguous
                  language constructs}},
  journal = {Software: Practice and Experience},
  volume = {33},
  number = {1},
  month = {January},
  year = {2003},
  pages = {19-39},
  doi = {10.1002/spe.493},
  abstract = {Software tools are fundamental to the comprehension,
                  analysis, testing and debugging of application
                  systems. A necessary first step in the development
                  of many tools is the construction of a parser
                  front-end that can recognize the implementation
                  language of the system under development. In this
                  paper, we describe our use of token decoration to
                  facilitate recognition of ambiguous language
                  constructs. We apply our approach to the C++
                  language since its grammar is replete with ambiguous
                  derivations such as the declaration/expression and
                  template-declaration/expression ambiguity. We
                  describe our implementation of a parser front-end
                  for C++, keystone, and we describe our results in
                  decorating tokens for our test suite including the
                  examples from Clause Three of the C++ standard. We
                  are currently exploiting the keystone front-end to
                  develop a taxonomy for implementation-based class
                  testing and to reverse-engineer Unified Modeling
                  Language (UML) class diagrams.},
  url = {http://www3.interscience.wiley.com/cgi-bin/issuetoc?ID=101526178}
}
@book{power-raijt02,
  author = {James F. Power and John T. Waldron (Eds.)},
  title = {{Recent Advances in Java
Technology: Theory, Application, Implementation}},
  publisher = {Computer Science Press, Trinity College Dublin},
  month = {November},
  year = {2002},
  abstract = {Since its launch, the Java programming language has
                  quickly established itself as a backbone technology
                  in many areas of computer science and information
                  systems. By leveraging the power, reliability and
                  portability of the Java framework, applications
                  developers have harnessed a means of creating robust
                  and mobile applications. In RECENT ADVANCES IN JAVA
                  TECHNOLOGY we investigate the present day widespread
                  use of Java and Java related technologies to provide
                  a platform for cutting-edge developments in
                  software.},
  pdf = {2002/power-raijt-toc.pdf}
}
@inproceedings{gregg-jg02,
  author = {D. Gregg and J.F. Power and J.T. Waldron},
  title = {{Measuring the impact of
object-oriented techniques in Grande applications:
a method-level analysis}},
  booktitle = {Joint ACM-ISCOPE Conference on Java Grande (Poster
                  Session)},
  address = {Seattle, Washington, USA},
  month = {November 3-5},
  year = {2002},
  pages = {229-229},
  abstract = {In this work we seek to provide a foundation for the
                  study of the level of use of object-oriented
                  techniques in Java programs in general, and
                  scientific applications in particular. Specifically,
                  we focus on the use of small methods, and the
                  frequency with which they are called, since this
                  forms the basis for the study of method inlining, an
                  important optimisation technique. We compare the
                  Grande and SPEC benchmark suites, and note a
                  significant difference in the nature and composition
                  of these suites.},
  pdf = {2002/gregg-jgi02.pdf},
  url = {http://portal.acm.org/citation.cfm?id=583841},
  annote = {Poster}
}
@inproceedings{power-wcre02,
  author = {J.F. Power and B.A. Malloy},
  title = {{Program annotation in XML: a parser-based approach}},
  booktitle = {9th IEEE Working Conference on Reverse Engineering},
  address = {Richmond, Virginia, USA},
  month = {October 28 - November 1},
  year = {2002},
  pages = {190-198},
  abstract = {In this paper we describe a technique that can be used
                  to annotate source code with syntactic tags in XML
                  format. This is achieved by modifying the parser
                  generator bison to emit these tags for an arbitrary
                  grammar. We also discuss an immediate application of
                  this technique, a portable modification of the gcc
                  compiler, that allows for XML output for C,
                  Objective C, C++ and Java programs. While our
                  approach does not have the same semantic richness as
                  other approaches, it does have the advantage of
                  being language independent, and thus re-usable in a
                  number of different domains.},
  pdf = {2002/power-wcre02.pdf},
  url = {http://ieeexplore.ieee.org/xpl/tocresult.jsp?isNumber=26362}
}
@inproceedings{gibbs-ase02,
  author = {T.H. Gibbs and B.A. Malloy and J.F. Power},
  title = {{ Automated Validation of Class Invariants in C++
                  Applications}},
  booktitle = {17th IEEE International Conference on Automated
                  Software Engineering},
  address = {Edinburgh, UK},
  month = {September 23-27},
  year = {2002},
  pages = {205-214},
  abstract = {In this paper, we describe a non-invasive approach for
                  validation of class invariants in C++
                  applications. Our approach is fully automated so
                  that the user need only supply the class invariants
                  for each class hierarchy to be checked and our
                  validator constructs an InvariantVisitor, a
                  variation of the Visitor Pattern, and an
                  InvariantFacilitator. Instantiations of the
                  InvariantVisitor and InvariantFacilitator classes
                  encapsulate the invariants in C++ statements and
                  facilitate the validation of the invariants. We
                  describe both our approach and our results of
                  validating invariants in keystone, a well tested
                  parser front-end for C++.},
  pdf = {2002/gibbs-ase02.pdf},
  url = {http://ieeexplore.ieee.org/xpl/tocresult.jsp?isNumber=24593}
}
@inproceedings{malloy-saicsit02,
  author = {B. A. Malloy and J. F. Power and J. T. Waldron},
  title = {{Applying Software Engineering Techniques to Parser
                  Design}},
  booktitle = {Conference of the South African Institute of Computer
                  Scientists and Information Technologists},
  address = {Port Elizabeth, South Africa},
  month = {September 16-18},
  year = {2002},
  pages = {75-82},
  abstract = {In this paper we describe the development of a parser
                  for the C# programming language. We outline the
                  development process used, detail its application to
                  the development of a C# parser and present a number
                  of metrics that describe the parser's
                  evolution. This paper presents and reinforces an
                  argument for the application of software engineering
                  techniques in the area of parser design. The
                  development of a parser for the C# programming
                  language is in itself important to software
                  engineering, since parsers form the basis for tools
                  such as metrics generators, refactoring tools,
                  pretty-printers and reverse engineering tools.},
  pdf = {2002/malloy-saicsit02.pdf},
  url = {http://portal.acm.org/citation.cfm?id=581516}
}
@inproceedings{donoghue-ire02,
  author = {D. O'Donoghue and A. Leddy and J.F. Power and
                  J.T. Waldron},
  title = {{Bi-gram Analysis of Java Bytecode Sequences}},
  booktitle = {Proceedings of the Second Workshop on Intermediate
                  Representation Engineering for the Java Virtual
                  Machine},
  address = {Trinity College, Dublin, Ireland},
  month = {June 13-14},
  year = {2002},
  pages = {187-192},
  abstract = {We report on a project that performed a bigram analysis
                  of dynamic bytecode sequences. The objective was to
                  identify the most commonly used bytecode pairs, and
                  to examine the relative frequency of occurrence of
                  these bytecodes. In all, 12 large Java programs were
                  analysed, taken from the Java Grande and SPEC
                  benchmark suites. Our findings are of relevance to
                  research into instruction set design and
                  implementation, as well as JVM optimisation.},
  pdf = {2002/donoghue-ire02.pdf},
  url = {http://portal.acm.org/citation.cfm?id=638513}
}
@article{malloy-ddj02,
  author = {Brian A. Malloy and Scott A. Linde and Edward B. Duffy and
                  James F. Power},
  title = {{Testing C++ Compilers for ISO Language Conformance}},
  journal = {Dr. Dobb's Journal},
  number = {337},
  month = {June},
  year = {2002},
  pages = {71-78},
  abstract = {In this paper, we describe our construction of a test
                  harness to measure conformance of some popular C++
                  compilers and to measure the progress of the gcc C++
                  compiler as it moves toward ISO conformance. In an
                  attempt to apply the same standard to all of the
                  vendors, we use the same test cases and the same
                  testing framework for all executions, even though
                  some of the compilers are platform dependent and
                  there is no common platform for all compilers. We
                  found that the Python language provided the
                  functionality that we needed with its scripting
                  facility, its platform independence and its object
                  orientation to facilitate code reuse. Python
                  includes a testing framework as a module of the
                  language and we have extended the framework to
                  measure C++ ISO conformance.},
  pdf = {2002/malloy-ddj02.pdf},
  url = {http://www.ddj.com/articles/2002/0206/}
}
@inbook{dowling-wseas02,
  author = {T. Dowling and J. Power and J. Waldron},
  title = {{Relating Static and Dynamic Measurements for the Java
                  Virtual Machine Instruction Set}},
  booktitle = {Recent Advances in Simulation, Computational Methods
                  and Soft Computing},
  editor = {N.E. Mastorakis},
  publisher = {WSEAS Press},
  year = {2002},
  pages = {106-111},
  abstract = {This is a book-length version of our MMACTEE '01 paper.}
}
@inbook{gregg-jm02,
  author = {D. Gregg and J. Power and J. Waldron},
  title = {{Benchmarking the Java Virtual Architecture - The SPEC
            JVM98 Benchmark Suite}},
  booktitle = {Java Microarchitectures},
  editor = {N. Vijaykrishnan and M. Wolczko},
  chapter = {1},
  publisher = {Kluwer Academic},
  year = {2002},
  pages = {1-18},
  abstract = {In this chapter we present a study of the SPEC JVM98
                  benchmark suite at a dynamic platform-independent
                  level. The results presented describe the influence
                  of class library code, the relative importance of
                  various methods in the suite, as well as the sizes
                  of the local variable, parameter and operand
                  stacks. We also examine the dynamic bytecode
                  instruction usage frequencies, and discuss their
                  relevance. The influence of the choice of Java
                  source to bytecode compiler is shown to be
                  relatively insignificant at present.},
  pdf = {2002/gregg-jm02.pdf},
  doi = {10.1007/978-1-4615-0993-6_1}
}
@inproceedings{matzko-tools02,
  author = {Sarah Matzko and Peter J. Clarke and Tanton H. Gibbs and
                  Brian A. Malloy and James F. Power and Rosemary
                  Monahan},
  title = {{Reveal: A Tool to Reverse Engineer Class Diagrams}},
  booktitle = {Fortieth International Conference on Technology of
                  Object-Oriented Languages and Systems},
  address = {Sydney, Australia},
  month = {February 18-21},
  year = {2002},
  pages = {13-21},
  abstract = {Many systems are constructed without the use of modeling
                  and visualization artifacts, due to constraints
                  imposed by deadlines or a shortage of
                  manpower. Nevertheless, such systems might profit
                  from the visualization provided by diagrams to
                  facilitate maintenance of the constructed system. In
                  this paper, we present a tool, Reveal, to reverse
                  engineer a class diagram from the C++ source code
                  representation of the software. In Reveal, we remain
                  faithful to the UML standard definition of a class
                  diagram wherever possible. However, to accommodate
                  the vagaries of the C++ language, we offer some
                  extensions to the standard notation to include
                  representations for namespaces, stand-alone
                  functions and friend functions. We compare our
                  representation to three other tools that
                  reverse-engineer class diagrams, for both compliance
                  to the UML standard and for their ability to
                  faithfully represent the software system under
                  study.},
  pdf = {2002/matzko-tools02.pdf},
  url = {http://portal.acm.org/citation.cfm?id=564095}
}
@inproceedings{dowling-mmactee01,
  author = {T. Dowling and J. Power and J. Waldron},
  title = {{Relating Static and Dynamic Measurements for the Java
                  Virtual Machine
Instruction Set}},
  booktitle = {Symposium on Mathematical Methods and Computational
                  Techniques in Electronic Engineering},
  address = {Athens, Greece},
  month = {December 29-31},
  year = {2001},
  abstract = {It has previously been noted that, for conventional
                  machine code, there is a strong relationship between
                  static and dynamic code measurements. One of the
                  goals of this paper is to examine whether this same
                  relationship is true of Java programs at the
                  bytecode level. To this end, the hypothesis of a
                  linear correlation between static and dynamic
                  frequencies was investigated using Pearson's
                  correlation coefficient. Programs from the Java
                  Grande and SPEC benchmarks suites were used in the
                  analysis.},
  pdf = {2001/dowling-mmactee01.pdf}
}
@inproceedings{horgan-scam01,
  author = {Jane Horgan and James Power and John Waldron},
  title = {{Measurement and Analysis of Runtime Profiling Data for
                  Java Programs}},
  booktitle = {IEEE International Workshop on Source Code Analysis and
                  Manipulation},
  address = {Florence, Italy},
  month = {November 10},
  year = {2001},
  abstract = {In this paper we examine a procedure for the analysis of
                  data produced by the dynamic profiling of Java
                  programs. In particular, we describe the issues
                  involved in dynamic analysis, propose a metric for
                  discrimination between the resulting data sets, and
                  examine its application over different test suites
                  and compilers.},
  pdf = {2001/horgan-scam01.pdf},
  url = {http://ieeexplore.ieee.org/xpl/tocresult.jsp?isNumber=20963}
}
@inproceedings{daly-waad01,
  author = {Charles Daly and Jane Horgan and James Power and John
                  Waldron},
  title = {{Gender and Learning Systems}},
  booktitle = {Third International Conference on Women in Africa and
                  the African Diaspora},
  address = {Antananarivo/Tamatave, Madagascar},
  month = {October 8-17},
  year = {2001},
  abstract = {A great deal of research has been done on gender issues
                  in computer science education but very little on the
                  relationship between automated learning systems and
                  gender. The benefits of such systems have not been
                  exploited to their full extent in developing
                  countries where educational resources and qualified
                  instructors are at a premium. The system which we
                  present here, MIPSMARK, can be used for learning and
                  assessment with minimum intervention of the
                  lecturer, and is freely available on the web.}
}
@inproceedings{malloy-icis01,
  author = {Brian A. Malloy and James F. Power},
  title = {{An Interpretation of Purdom's Algorithm for Automatic
                  Generation of Test Cases}},
  booktitle = {1st Annual International Conference on Computer and
                  Information Science},
  address = {Orlando, Florida, USA},
  month = {October 3-5},
  year = {2001},
  abstract = {We present a structured reformulation of the seminal
                  algorithm for automatic generation of test cases for
                  a context-free grammar. Our reformulation simplifies
                  the algorithm in several ways. First, we provide a
                  structured reformulation so that it is obvious where
                  to proceed at each step. Second, we partition the
                  intricate third phase into five functions, so that
                  the discussion and comprehension of this phase can
                  be modularized. Our implementation of the algorithm
                  provides information about the grammatic, syntactic
                  and semantic correctness of the generated test cases
                  for two important languages in use today: C and
                  C++. The results of our study of C and C++ highlight
                  a lacuna latent in the research to date. In
                  particular, if one or more of the automatically
                  generated test cases is syntactically or
                  semantically incorrect, then the confidence of
                  structural ``coverage'' may be compromised for the
                  particular grammar-based tool under test. Our
                  ongoing work focuses on a solution to this
                  problem.},
  pdf = {2001/malloy-icis01.pdf}
}
@inproceedings{aziz-ssgrr01,
  author = {B. Aziz and D. Gray and G. Hamilton and F. Oehl and
                  J. Power and D. Sinclair},
  title = {{Implementing Protocol Verification for E-Commerce}},
  booktitle = {International Conference on Advances in Infrastructure
                  for Electronic Business, Science, and Education on
                  the Internet},
  address = {L'Aquila, Italy},
  month = {Aug 6-12},
  year = {2001},
  abstract = {This paper presents a survey of the practical
                  application of protocol verification techniques to
                  applications in e­commerce. We concentrate in
                  particular on logic­based approaches, and review the
                  current state of the art as well as the prospects
                  for realistic deployment of protocol verification
                  techniques in the near future.},
  pdf = {2001/aziz-ssgrr01.pdf},
  annote = {Non peer-reviewed}
}
@inproceedings{byrne-ire01,
  author = {S. Byrne and J. Power and J. Waldron},
  title = {{A Dynamic Comparison of the SPEC98 and Java Grande
                  Benchmark
Suites}},
  booktitle = {Workshop on Intermediate Representation Engineering for
                  the Java Virtual Machine},
  address = {Orlando, Florida, USA},
  month = {July 22-25},
  year = {2001},
  pages = {95-98},
  abstract = {Two of the most commonly used benchmark suites for Java
                  Programs are the SPEC98 and Grande Forum benchmark
                  suites. This research uses a Platform Independent
                  Dynamic Analysis Technique to study these suites and
                  quantify the significant similarities and
                  differences in behaviour between the suites. Dynamic
                  frequencies adduced include method execution divided
                  into program, API and native cat- egories. The most
                  informative basis for measurement is shown to be
                  percentages of executed bytecodes charged to each
                  method, and results are reported for the API
                  packages.},
  pdf = {2001/byrne-ire01.pdf}
}
@inproceedings{power-fmics01,
  author = {James Power and David Sinclair},
  title = {{A Formal Model of Forth Control Words in the
                  Pi-Calculus}},
  booktitle = {6th International Workshop on Formal Methods for
                  Industrial Critical Systems},
  address = {Paris, France},
  month = {July 16-17},
  year = {2001},
  abstract = {In this paper we develop a formal specification of
                  aspects of the Forth programming language. We
                  describe the operation of the Forth compiler as it
                  translates Forth control words, dealing in
                  particular with the interpretation of immediate
                  words during compilation. Our goal here is to
                  provide a basis for the study of safety properties
                  of embedded systems, many of which are constructed
                  using Forth or Forth-like languages. To this end we
                  construct a model of the Forth compiler in the
                  pi-calculus, and have simulated its execution by
                  animating this model using the pict programming
                  language.},
  pdf = {2001/power-fmics01.pdf}
}
@inproceedings{fritsche-pdcat01,
  author = {Karsten Fritsche and James Power and John Waldron},
  title = {{A Java Distributed Computation Library}},
  booktitle = {Second International Conference on Parallel and
                  Distributed Computing, Applications and
                  Technologies},
  address = {Taipei, Taiwan},
  month = {July 9-11},
  year = {2001},
  abstract = {This paper describes the design and development of a
                  Java Distributed Computation Library, which provides
                  a simple development platform for developers who
                  wish to quickly implement a distributed computation
                  in the context of an SPMD architecture (Single
                  Program, Multiple Data). The need for this research
                  arose out of the realisation that the currently
                  available distributed computation libraries and
                  systems do not adequately meet certain criteria,
                  such as ease of development, dynamic changes to
                  system behaviour, and easy deployment of distributed
                  software. The proposed solution to this problem was
                  to produce a Java-based distributed computation
                  library which enables developers to use the Java
                  language to quickly and easily implement a
                  distributed computation. The results of experiments
                  conducted using DCL are also presented, as a means
                  of showing that DCL met its design goals.},
  pdf = {2001/fritsche-pdcat01.pdf}
}
@inproceedings{daly-jg01,
  author = {Charles Daly and Jane Horgan and James Power and John
                  Waldron},
  title = {{Platform Independent Dynamic Java Virtual Machine
                  Analysis: the Java Grande Forum Benchmark Suite}},
  booktitle = {Joint ACM Java Grande - ISCOPE 2001 Conference},
  address = {Stanford University, USA},
  month = {June 2-4},
  year = {2001},
  abstract = {In this paper we present a platform independent analysis
                  of the dynamic profiles of Java programs when
                  executing on the Java Virtual Machine. The Java
                  programs selected are taken from the Java Grande
                  Forum benchmark suite, and five different
                  Java-to-bytecode compilers are analysed. The results
                  presented describe the dynamic instruction usage
                  frequencies, as well as the sizes of the local
                  variable, parameter and operand stacks during
                  execution on the JVM. These results, presenting a
                  picture of the actual (rather than presumed)
                  behaviour of the JVM, have implications both for the
                  coverage aspects of the Java Grande benchmark
                  suites, for the performance of the Java-to-bytecode
                  compilers, and for the design of the JVM.},
  pdf = {2001/daly-jg01.pdf},
  url = {http://portal.acm.org/citation.cfm?id=376826}
}
@inproceedings{gray-fscbs01,
  author = {David Gray and Geoff Hamilton and James Power and David
                  Sinclair},
  title = {{A Specification of TCP/IP using Mixed Intuitionistic
                  Linear Logic
(Extended Abstract) }},
  booktitle = {2nd Joint Workshop on Formal Specification of
                  Computer-Based Systems},
  address = {Washington DC, USA},
  month = {20 April},
  year = {2001},
  abstract = {This paper presents an outline specification of the IP
                  and TCP communication protocols in mixed
                  intuitionistic linear logic and describes how this
                  logic can be used to prove some properties of both
                  protocols. We have previously presented a
                  specification of IP using commutative linear logic;
                  in this paper we extend this specification
                  considerably to include TCP, which, in turn,
                  necessitates the use of non-commutative operators.},
  pdf = {2001/gray-fscbs01.pdf}
}
@inproceedings{daly-joses01,
  author = {Charles Daly and Jane Horgan and James Power and John
                  Waldron},
  title = {{Some measurements of Java-to-byecode compiler performance
                  in the Java Virtual Machine}},
  booktitle = {Workshop on Java Optimization Strategies for Embedded
                  Systems},
  address = {University of Genova, Italy},
  month = {April 1},
  year = {2001},
  abstract = {In this paper we present a platform independent analysis
                  of the dynamic profiles of Java programs when
                  executing on the Java Virtual Machine. The Java
                  programs selected are taken from the Java Grande
                  Forum benchmark suite, and five different
                  Java-to-bytecode compilers are analysed. The results
                  presented describe the dynamic instruction usage
                  frequencies.},
  pdf = {2001/daly-joses01.pdf}
}
@inproceedings{power-sac01,
  author = {James Power and Brian Malloy},
  title = {{Exploiting Metrics to Facilitate Grammar Transformation
                  into
LALR Format}},
  booktitle = {16th ACM Symposium on Applied Computing},
  address = {Las Vegas, USA},
  month = {March 11-14},
  year = {2001},
  abstract = {The recently-standardized ISO C++ programming language
                  has a large and complex grammar and, due to the
                  nature of the language, any C++ parser must exhibit
                  tight coupling with subsequent phases of program
                  processing. The preparation of input suitable for
                  yacc in this case involves taking the original ISO
                  C++ grammar, designed for readability, and refining
                  it towards a version acceptable to yacc's parsing
                  algorithm. Since the structure of the whole ISO C++
                  standard closely follows the ISO C++ grammar, and
                  since this grammar is a widely-accepted standard, it
                  is desirable that the yacc source for the parser
                  resemble it as closely as possible. The design
                  process of the parser, then, involves a continuous
                  trade-off between preserving the grammar's
                  structure, and rearranging it to ease
                  implementation. We exploit software metrics, as an
                  aid towards estimating the com- plexity of this
                  task, and providing a means of assessing the
                  relative merits of these trade-offs. We see this
                  work as part of a larger process of designing
                  well-engineered, re-usable and reliable program
                  processors, which themselves will play an important
                  role in the future design of code-based
                  software-engineering tools.},
  pdf = {2001/power-sac01.pdf},
  url = {http://portal.acm.org/citation.cfm?id=372789}
}
@inproceedings{power-tools00,
  author = {James F. Power and Brian A. Malloy},
  title = {{Symbol Table Construction and Name Lookup in ISO C++}},
  booktitle = {37th International Conference on Technology of
                  Object-Oriented Languages and Systems},
  address = {Sydney, Australia},
  month = {November 20-23},
  year = {2000},
  abstract = {In this paper, we present an object-oriented model of
                  symbol table construction and name lookup for ISO
                  C++ using the Unified Modeling Language (UML). Our
                  use of UML class, activity and sequence diagrams
                  serves to explicate our model and our use of
                  patterns such as decorator and facade increase the
                  understandability of the model. Clause three of the
                  ISO C++ standard describes the procedures and rules
                  for performing name lookup; our activity and
                  sequence diagrams serve to simulate these procedures
                  in graphical fashion. An advantage of our approach
                  is that our model can increase C++ understandability
                  for those practitioners with a working UML
                  knowledge.},
  pdf = {2000/power-tools00.pdf}
}
@inproceedings{cosgrave-saicsit00,
  author = {Lisa Cosgrave and James Power and John Waldron},
  title = {{An Object Oriented Approach to Parser Generation in C++}},
  booktitle = {Conference of the South African Institute of Computer
                  Scientists and Information Technologists},
  address = {Cape Town, South Africa},
  month = {November 1-3},
  year = {2000},
  abstract = {In this paper we describe the design and implementation
                  of a system for representing context-free grammars
                  in C++. The system allows for grammar representation
                  at the object level, providing enhanced modularity
                  and flexibility when compared to traditional
                  generator-based approaches. We also describe the
                  transformation of grammar flow analysis problems
                  into an object-oriented framework using the Visitor
                  pattern, as well as the implementation of a top-down
                  LL(1) parser. As such, this work represents the
                  synthesis of three presently disparate fields in
                  parser design and implementation: combinator
                  parsing, fixpoint-based grammar flow analysis, and
                  object-oriented design.},
  pdf = {2000/cosgrave-saicsit00.pdf}
}
@inproceedings{waldron-csit00,
  author = {John Waldron and James Power},
  title = {{Comparison of Bytecode and Stack Frame Usage by Eiffel and
                  Java
Programs in the Java Virtual Machine}},
  booktitle = {2nd International Workshop on Computer Science and
                  Information Technologies},
  address = {Ufa, Russia},
  month = {September 18-23},
  year = {2000},
  abstract = {Dynamic quantatative measurements Bytecode and Stack
                  Frame Usage by Eiffel and Java Programs in the Java
                  Virtual Machine are made. Two Eiffel programs are
                  dynamically analysed while executing on the JVM, and
                  the results compared with those from the Java
                  Programs. The aim is to examine whether properties
                  like instruction usage and stack frame size are
                  properties of the Java programming language itself
                  or are exhibited by Eiffel programs as
                  well. Remarkably local_load, push_const and
                  local_store always account for very close to 40\% of
                  instructions executed, a property of the Java
                  Virtual Machine irrespective of the programming
                  language compiler or compiler optimizations used.},
  pdf = {2000/waldron-csit00.pdf}
}
@inproceedings{power-iwpc00,
  author = {James Power and Brian Malloy},
  title = {{Metric-Based Analysis of Context-Free Grammars}},
  booktitle = {8th IEEE International Workshop on Program Comprehension},
  address = {Limerick, Ireland},
  month = {10-11 June},
  year = {2000},
  abstract = {Recent advances in software engineering have produced a
                  variety of well established approaches, formalisms
                  and techniques to facilitate the construction of
                  large-scale applications. Developers interested in
                  the construction of robust, extensible software that
                  is easy to maintain should expect to deploy a range
                  of these techniques, as appropriate to the task. In
                  this paper, we provide a foundation for the
                  application of established software metrics to the
                  measurement of context-free grammars. The usual
                  application of software metrics is to program code;
                  we provide a mapping that allows these metrics to be
                  applied to grammars. This allows us to interpret six
                  software engineering metrics in a grammatical
                  context, including McCabe's complexity metric and
                  Fenton's impurity metric. We have designed and
                  implemented a tool to automatically compute the six
                  metrics; as a case study, we use these six metrics
                  to measure some of the properties of grammars for
                  the Oberon, ISO C, ISO C++ and Java programming
                  languages. We believe that the techniques that we
                  have developed can be applied to estimating the
                  difficulty of designing, implementing, testing and
                  maintaining parsers for large grammars.},
  pdf = {2000/power-iwpc00.pdf}
}
@inproceedings{sinclair-dsvv00,
  author = {David Sinclair and James Power and Paul Gibson and David
                  Gray and and Geoff Hamilton},
  title = {{Specifying and Verifying IP with Linear Logic}},
  booktitle = {International Workshop on Distributed Systems
                  Validation and Verification},
  address = {Taipei, Taiwan, ROC},
  month = {April 10},
  year = {2000},
  abstract = {This paper presents a specification of the IP layer in
                  linear logic and shows how linear logic can be used
                  to prove some properties of this layer. Both the
                  specification and the correctness proofs have been
                  validated using the Coq proof assistant, via the
                  authors' embedding of linear logic into this
                  constructive framework.},
  pdf = {2000/sinclair-dsvv00.pdf}
}
@inproceedings{power-sac00,
  author = {James F. Power and Brian A. Malloy},
  title = {{An Approach for Modeling the Name Lookup Problem in the
                  {C++} Programming Language}},
  booktitle = {15th ACM Symposium on Applied Computing},
  address = {Villa Olmo, Como, Italy},
  month = {March 19-21},
  year = {2000},
  abstract = {Formal grammars are well established for specifying the
                  syntax of programming languages. However, the formal
                  specification of programming language semantics has
                  proven more elusive. A recent standard, the Unified
                  Modeling Language (UML), has quickly become
                  established as a common framework for the
                  specification of large scale software
                  applications. In this paper, we describe an approach
                  for using the UML to specify aspects of the static
                  semantics of programming languages. In particular,
                  we describe a technique for solving the name lookup
                  problem for the recently standardized C++
                  programming language. We apply our approach to C++
                  because a solution to the name lookup problem is
                  required for parser construction and our solution
                  can be applied to Java and other programming
                  languages.},
  doi = {10.1145/338407.338564},
  pdf = {2000/power-sac00-e.pdf}
}
@inproceedings{power-tphols99,
  author = {James Power and Caroline Webster},
  title = {{Working with Linear Logic in Coq}},
  booktitle = {12th International Conference on Theorem Proving in
                  Higher Order Logics (Work-in-progress paper)},
  address = {University of Nice, France},
  month = {September 14-17},
  year = {1999},
  abstract = {In this paper we describe the encoding of linear logic
                  in the Coq system, a proof assistant for
                  higher-order logic. This process involved encoding a
                  suitable consequence relation, the relevant
                  operators, and some auxiliary theorems and
                  tactics. The encoding allows us to state and prove
                  theorems in linear logic, and we demonstrate its use
                  through two examples: a simple blocks world
                  scenario, and the Towers of Hanoi problem.},
  pdf = {1999/power-tphols99.pdf},
  url = {http://www.cs.nuim.ie/~jpower/Research/LinearLogic/},
  annote = {Work-in-progress paper}
}
@inproceedings{gray-iwfm99,
  author = {David Gray and Geoff Hamilton and David Sinclair and Paul
                  Gibson and James Power},
  title = {{Four Logics and a Protocol}},
  booktitle = {3rd. Irish Workshop in Formal Methods},
  address = {National University of Ireland, Galway, Ireland},
  month = {July 1-2},
  year = {1999},
  abstract = {The Internet Protocol (IP) is the protocol used to
                  provide connectionless communication between hosts
                  connected to the Internet. It provides a basic
                  internetworking service to transport protocols such
                  as Transmission Control Protocol (TCP) and User
                  Datagram Protocol (UDP). These in turn provide both
                  connection-oriented and connectionless services to
                  applications such as file transfer (FTP) and WWW
                  browsing. In this paper we present four separate
                  specifications of the interface to the
                  internetworking layer implemented by IP using four
                  types of logic: classical, constructive, temporal
                  and linear logic.},
  pdf = {1999/gray-iwfm99.pdf}
}

This file was generated by bibtex2html 1.98.

Before 1999: See previous work

James Power,
Dept. of Computer Science, Maynooth University
Last revised: Thursday September 14, 2017