2024
Simone Leo, Michael R. Crusoe, Laura Rodríguez-Navas, Raül Sirvent, Alexander Kanitz, Paul De Geest, Rudolf Wittner, Luca Pireddu, Daniel Garijo, José M. Fernández, Iacopo Colonnelli, Matej Gallo, Tazro Ohta, Hirotaka Suetake, Salvador Capella-Gutierrez, Renske Wit, Bruno P. Kinoshita, Stian Soiland-Reyes
Recording provenance of workflow runs with RO-Crate Journal Article
In: PLoS ONE, vol. 19, no. 9, pp. 1–35, 2024.
@article{24:pone:wfrunrocrate,
title = {Recording provenance of workflow runs with RO-Crate},
author = {Simone Leo and Michael R. Crusoe and Laura Rodríguez-Navas and Raül Sirvent and Alexander Kanitz and Paul De Geest and Rudolf Wittner and Luca Pireddu and Daniel Garijo and José M. Fernández and Iacopo Colonnelli and Matej Gallo and Tazro Ohta and Hirotaka Suetake and Salvador Capella-Gutierrez and Renske Wit and Bruno P. Kinoshita and Stian Soiland-Reyes},
doi = {10.1371/journal.pone.0309210},
year = {2024},
date = {2024-09-01},
journal = {PLoS ONE},
volume = {19},
number = {9},
pages = {1–35},
publisher = {Public Library of Science},
abstract = {Recording the provenance of scientific computation results is key to the support of traceability, reproducibility and quality assessment of data products. Several data models have been explored to address this need, providing representations of workflow plans and their executions as well as means of packaging the resulting information for archiving and sharing. However, existing approaches tend to lack interoperable adoption across workflow management systems. In this work we present Workflow Run RO-Crate, an extension of RO-Crate (Research Object Crate) and Schema.org to capture the provenance of the execution of computational workflows at different levels of granularity and bundle together all their associated objects (inputs, outputs, code, etc.). The model is supported by a diverse, open community that runs regular meetings, discussing development, maintenance and adoption aspects. Workflow Run RO-Crate is already implemented by several workflow management systems, allowing interoperable comparisons between workflow runs from heterogeneous systems. We describe the model, its alignment to standards such as W3C PROV, and its implementation in six workflow systems. Finally, we illustrate the application of Workflow Run RO-Crate in two use cases of machine learning in the digital image analysis domain.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Alberto Mulone, Doriana Medić, Marco Aldinucci
A Fault Tolerance mechanism for Hybrid Scientific Workflows Proceedings Article
In: 1st workshop about High-Performance e-Science (HiPES), Madrid, Spain, 2024.
@inproceedings{24:madrid:hipes,
title = {A Fault Tolerance mechanism for Hybrid Scientific Workflows},
author = {Alberto Mulone and Doriana Medić and Marco Aldinucci},
year = {2024},
date = {2024-08-01},
booktitle = {1st workshop about High-Performance e-Science (HiPES)},
address = {Madrid, Spain},
abstract = {In large distributed systems, failures are a daily event occurring frequently, especially with growing numbers of computation tasks and locations on which they are deployed. The advantage of representing an application as a workflow is possibility to utilize the Workflow Management Systems which are reliable systems guaranteeing the correct execution of the application and providing the features such as portability, scalability, and fault tolerance. Over recent years, the emergence of hybrid workflows has posed new and intriguing challenges by increasing the possibility of distributing computations involving heterogeneous and independent environments. As a consequence, the number of possible points of failure in the execution augmented, creating different important challenges interesting to study.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Iacopo Colonnelli, Robert Birke, Giulio Malenza, Gianluca Mittone, Alberto Mulone, Jeroen Galjaard, Lydia Y. Chen, Sanzio Bassini, Gabriella Scipione, Jan Martinovič, Vit Vondrák, Marco Aldinucci
Cross-Facility Federated Learning Journal Article
In: Procedia Computer Science, vol. 240, pp. 3–12, 2024, ISSN: 1877-0509.
@article{24:eurohpc:xffl,
title = {Cross-Facility Federated Learning},
author = {Iacopo Colonnelli and Robert Birke and Giulio Malenza and Gianluca Mittone and Alberto Mulone and Jeroen Galjaard and Lydia Y. Chen and Sanzio Bassini and Gabriella Scipione and Jan Martinovič and Vit Vondrák and Marco Aldinucci},
doi = {10.1016/j.procs.2024.07.003},
issn = {1877-0509},
year = {2024},
date = {2024-01-01},
booktitle = {Proceedings of the First EuroHPC user day},
journal = {Procedia Computer Science},
volume = {240},
pages = {3–12},
publisher = {Elsevier},
address = {Bruxelles, Belgium},
abstract = {In a decade, AI frontier research transitioned from the researcher's workstation to thousands of high-end hardware-accelerated compute nodes. This rapid evolution shows no signs of slowing down in the foreseeable future. While top cloud providers may be able to keep pace with this growth rate, obtaining and efficiently exploiting computing resources at that scale is a daunting challenge for universities and SMEs. This work introduces the Cross-Facility Federated Learning (XFFL) framework to bridge this compute divide, extending the opportunity to efficiently exploit multiple independent data centres for extreme-scale deep learning tasks to data scientists and domain experts. XFFL relies on hybrid workflow abstractions to decouple tasks from environment-specific technicalities, reducing complexity and enhancing reusability. In addition, Federated Learning (FL) algorithms eliminate the need to move large amounts of data between different facilities, reducing time-to-solution and preserving data privacy. The XFFL approach is empirically evaluated by training a full LLaMAv2 7B instance on two facilities of the EuroHPC JU, showing how the increased computing power completely compensates for the additional overhead introduced by two data centres.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2023
Marco Aldinucci, Elena Maria Baralis, Valeria Cardellini, Iacopo Colonnelli, Marco Danelutto, Sergio Decherchi, Giuseppe Di Modica, Luca Ferrucci, Marco Gribaudo, Francesco Iannone, Marco Lapegna, Doriana Medic, Giuseppa Muscianisi, Francesca Righetti, Eva Sciacca, Nicola Tonellotto, Mauro Tortonesi, Paolo Trunfio, Tullio Vardanega
A Systematic Mapping Study of Italian Research on Workflows Proceedings Article
In: Proceedings of the SC '23 Workshops of The International Conference on High Performance Computing, Network, Storage, and Analysis, SC-W 2023, pp. 2065–2076, ACM, Denver, CO, USA, 2023.
@inproceedings{WORKS2023,
title = {A Systematic Mapping Study of Italian Research on Workflows},
author = {Marco Aldinucci and Elena Maria Baralis and Valeria Cardellini and Iacopo Colonnelli and Marco Danelutto and Sergio Decherchi and Giuseppe Di Modica and Luca Ferrucci and Marco Gribaudo and Francesco Iannone and Marco Lapegna and Doriana Medic and Giuseppa Muscianisi and Francesca Righetti and Eva Sciacca and Nicola Tonellotto and Mauro Tortonesi and Paolo Trunfio and Tullio Vardanega},
doi = {10.1145/3624062.3624285},
year = {2023},
date = {2023-11-01},
booktitle = {Proceedings of the SC '23 Workshops of The International Conference on High Performance Computing, Network, Storage, and Analysis, SC-W 2023},
pages = {2065–2076},
publisher = {ACM},
address = {Denver, CO, USA},
abstract = {An entire ecosystem of methodologies and tools revolves around scientific workflow management. They cover crucial non-functional requirements that standard workflow models fail to target, such as interactive execution, energy efficiency, performance portability, Big Data management, and intelligent orchestration in the Computing Continuum. Characterizing and monitoring this ecosystem is crucial to develop an informed view of current and future research directions. This work conducts a systematic mapping study of the Italian workflow research community, collecting and analyzing 25 tools and 10 applications from several scientific domains in the context of the ``National Research Centre for HPC, Big Data, and Quantum Computing'' (ICSC). The study aims to outline the main current research directions and determine how they address the critical needs of modern scientific applications. The findings highlight a variegated research ecosystem of tools, with a prominent interest in advanced workflow orchestration and still immature but promising efforts toward energy efficiency.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Alberto Mulone, Sherine Awad, Davide Chiarugi, Marco Aldinucci
Porting the Variant Calling Pipeline for NGS data in cloud-HPC environment Proceedings Article
In: Shahriar, Hossain, Teranishi, Yuuichi, Cuzzocrea, Alfredo, Sharmin, Moushumi, Towey, Dave, Majumder, A. K. M. Jahangir Alam, Kashiwazaki, Hiroki, Yang, Ji-Jiang, Takemoto, Michiharu, Sakib, Nazmus, Banno, Ryohei, Ahamed, Sheikh Iqbal (Ed.): 47th IEEE Annual Computers, Software, and Applications Conference, COMPSAC 2023, pp. 1858–1863, IEEE, Torino, Italy, 2023.
@inproceedings{23:mulone:wide:vcp,
title = {Porting the Variant Calling Pipeline for NGS data in cloud-HPC environment},
author = {Alberto Mulone and Sherine Awad and Davide Chiarugi and Marco Aldinucci},
editor = {Hossain Shahriar and Yuuichi Teranishi and Alfredo Cuzzocrea and Moushumi Sharmin and Dave Towey and A. K. M. Jahangir Alam Majumder and Hiroki Kashiwazaki and Ji-Jiang Yang and Michiharu Takemoto and Nazmus Sakib and Ryohei Banno and Sheikh Iqbal Ahamed},
doi = {10.1109/COMPSAC57700.2023.00288},
year = {2023},
date = {2023-01-01},
booktitle = {47th IEEE Annual Computers, Software, and Applications Conference, COMPSAC 2023},
pages = {1858–1863},
publisher = {IEEE},
address = {Torino, Italy},
abstract = {In recent years we have understood the importance of analyzing and sequencing human genetic variation. A relevant aspect that emerged from the Covid-19 pandemic was the need to obtain results very quickly; this involved using High-Performance Computing (HPC) environments to execute the Next Generation Sequencing (NGS) pipeline. However, HPC is not always the most suitable environment for the entire execution of a pipeline, especially when it involves many heterogeneous tools. The ability to execute parts of the pipeline on different environments can lead to higher performance but also cheaper executions. This work shows the design and optimization process that led us to a state-of-the-art Variant Calling hybrid workflow based on the StreamFlow Workflow Management System (WfMS). We also compare StreamFlow with Snakemake, an established WfMS targeting HPC facilities, observing comparable performance on single environments and satisfactory improvements with a hybrid cloud-HPC configuration.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Iacopo Colonnelli, Bruno Casella, Gianluca Mittone, Yasir Arfat, Barbara Cantalupo, Roberto Esposito, Alberto Riccardo Martinelli, Doriana Medić, Marco Aldinucci
Federated Learning meets HPC and cloud Proceedings Article
In: Bufano, Filomena, Riggi, Simone, Sciacca, Eva, Schillirò, Francesco (Ed.): Astrophysics and Space Science Proceedings, pp. 193–199, Springer, Catania, Italy, 2023, ISBN: 978-3-031-34167-0.
@inproceedings{22:ml4astro,
title = {Federated Learning meets HPC and cloud},
author = {Iacopo Colonnelli and Bruno Casella and Gianluca Mittone and Yasir Arfat and Barbara Cantalupo and Roberto Esposito and Alberto Riccardo Martinelli and Doriana Medić and Marco Aldinucci},
editor = {Filomena Bufano and Simone Riggi and Eva Sciacca and Francesco Schillirò},
doi = {10.1007/978-3-031-34167-0_39},
isbn = {978-3-031-34167-0},
year = {2023},
date = {2023-01-01},
booktitle = {Astrophysics and Space Science Proceedings},
volume = {60},
pages = {193–199},
publisher = {Springer},
address = {Catania, Italy},
abstract = {HPC and AI are fated to meet for several reasons. This article will discuss some of them and argue why this will happen through the set of methods and technologies that underpin cloud computing. As a paradigmatic example, we present a new federated learning system that collaboratively trains a deep learning model in different supercomputing centers. The system is based on the StreamFlow workflow manager designed for hybrid cloud-HPC infrastructures.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Sandro Gepiro Contaldo, Luca Alessandri, Iacopo Colonnelli, Marco Beccuti, Marco Aldinucci
Bringing Cell Subpopulation Discovery on a Cloud-HPC Using rCASC and StreamFlow Book Chapter
In: Calogero, Raffaele Adolfo, Benes, Vladimir (Ed.): Single Cell Transcriptomics: Methods and Protocols, pp. 337–345, Springer US, New York, NY, 2023, ISBN: 978-1-0716-2756-3.
@inbook{Contaldo2023,
title = {Bringing Cell Subpopulation Discovery on a Cloud-HPC Using rCASC and StreamFlow},
author = {Sandro Gepiro Contaldo and Luca Alessandri and Iacopo Colonnelli and Marco Beccuti and Marco Aldinucci},
editor = {Raffaele Adolfo Calogero and Vladimir Benes},
doi = {10.1007/978-1-0716-2756-3_17},
isbn = {978-1-0716-2756-3},
year = {2023},
date = {2023-01-01},
booktitle = {Single Cell Transcriptomics: Methods and Protocols},
pages = {337–345},
publisher = {Springer US},
address = {New York, NY},
abstract = {The idea behind novel single-cell RNA sequencing (scRNA-seq) pipelines is to isolate single cells through microfluidic approaches and generate sequencing libraries in which the transcripts are tagged to track their cell of origin. Modern scRNA-seq platforms are capable of analyzing up to many thousands of cells in each run. Then, combined with massive high-throughput sequencing producing billions of reads, scRNA-seq allows the assessment of fundamental biological properties of cell populations and biological systems at unprecedented resolution.},
keywords = {},
pubstate = {published},
tppubtype = {inbook}
}
Iacopo Colonnelli
Workflow Models for Heterogeneous Distributed Systems Proceedings Article
In: Bena, Nicola, Martino, Beniamino Di, Maratea, Antonio, Sperduti, Alessandro, Nardo, Emanuel Di, Ciaramella, Angelo, Montella, Raffaele, Ardagna, Claudio A. (Ed.): Proceedings of the 2nd Italian Conference on Big Data and Data Science (ITADATA 2023), Naples, Italy, September 11-13, 2023, CEUR-WS.org, 2023.
@inproceedings{23:colonnelli:itadata,
title = {Workflow Models for Heterogeneous Distributed Systems},
author = {Iacopo Colonnelli},
editor = {Nicola Bena and Beniamino Di Martino and Antonio Maratea and Alessandro Sperduti and Emanuel Di Nardo and Angelo Ciaramella and Raffaele Montella and Claudio A. Ardagna},
year = {2023},
date = {2023-01-01},
booktitle = {Proceedings of the 2nd Italian Conference on Big Data and Data Science (ITADATA 2023), Naples, Italy, September 11-13, 2023},
volume = {3606},
publisher = {CEUR-WS.org},
series = {CEUR Workshop Proceedings},
abstract = {This article introduces a novel hybrid workflow abstraction that injects topology awareness directly into the definition of a distributed workflow model. In particular, the article briefly discusses the advantages brought by this approach to the design and orchestration of large-scale data-oriented workflows, the current level of support from state-of-the-art workflow systems, and some future research directions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2022
Marco Aldinucci, David Atienza, Federico Bolelli, Mónica Caballero, Iacopo Colonnelli, José Flich, Jon Ander Gómez, David González, Costantino Grana, Marco Grangetto, Simone Leo, Pedro López, Dana Oniga, Roberto Paredes, Luca Pireddu, Eduardo Quiñones, Tatiana Silva, Enzo Tartaglione, Marina Zapater
In: Curry, Edward, Auer, Sören, Berre, Arne J., Metzger, Andreas, Perez, Maria S., Zillner, Sonja (Ed.): Technologies and Applications for Big Data Value, pp. 183–202, Springer International Publishing, Cham, 2022, ISBN: 978-3-030-78307-5.
@incollection{22:TABDV,
title = {The DeepHealth Toolkit: A Key European Free and Open-Source Software for Deep Learning and Computer Vision Ready to Exploit Heterogeneous HPC and Cloud Architectures},
author = {Marco Aldinucci and David Atienza and Federico Bolelli and Mónica Caballero and Iacopo Colonnelli and José Flich and Jon Ander Gómez and David González and Costantino Grana and Marco Grangetto and Simone Leo and Pedro López and Dana Oniga and Roberto Paredes and Luca Pireddu and Eduardo Quiñones and Tatiana Silva and Enzo Tartaglione and Marina Zapater},
editor = {Edward Curry and Sören Auer and Arne J. Berre and Andreas Metzger and Maria S. Perez and Sonja Zillner},
doi = {10.1007/978-3-030-78307-5_9},
isbn = {978-3-030-78307-5},
year = {2022},
date = {2022-01-01},
booktitle = {Technologies and Applications for Big Data Value},
pages = {183–202},
publisher = {Springer International Publishing},
address = {Cham},
chapter = {9},
abstract = {At the present time, we are immersed in the convergence between Big Data, High-Performance Computing and Artificial Intelligence. Technological progress in these three areas has accelerated in recent years, forcing different players like software companies and stakeholders to move quickly. The European Union is dedicating a lot of resources to maintain its relevant position in this scenario, funding projects to implement large-scale pilot testbeds that combine the latest advances in Artificial Intelligence, High-Performance Computing, Cloud and Big Data technologies. The DeepHealth project is an example focused on the health sector whose main outcome is the DeepHealth toolkit, a European unified framework that offers deep learning and computer vision capabilities, completely adapted to exploit underlying heterogeneous High-Performance Computing, Big Data and cloud architectures, and ready to be integrated into any software platform to facilitate the development and deployment of new applications for specific problems in any sector. This toolkit is intended to be one of the European contributions to the field of AI. This chapter introduces the toolkit with its main components and complementary tools, providing a clear view to facilitate and encourage its adoption and wide use by the European community of developers of AI-based solutions and data scientists working in the healthcare sector and others.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Eduardo Quiñones, Jesus Perales, Jorge Ejarque, Asaf Badouh, Santiago Marco, Fabrice Auzanneau, François Galea, David González, José Ramón Hervás, Tatiana Silva, Iacopo Colonnelli, Barbara Cantalupo, Marco Aldinucci, Enzo Tartaglione, Rafael Tornero, José Flich, Jose Maria Martinez, David Rodriguez, Izan Catalán, Jorge Garcia, Carles Hernández
In: Terzo, Olivier, Martinovič, Jan (Ed.): HPC, Big Data, and AI Convergence Towards Exascale: Challenge and Vision, pp. 191–216, CRC Press, Boca Raton, Florida, 2022, ISBN: 978-1-0320-0984-1.
@incollection{22:deephealth:HPCbook,
title = {The DeepHealth HPC Infrastructure: Leveraging Heterogenous HPC and Cloud Computing Infrastructures for IA-based Medical Solutions},
author = {Eduardo Quiñones and Jesus Perales and Jorge Ejarque and Asaf Badouh and Santiago Marco and Fabrice Auzanneau and François Galea and David González and José Ramón Hervás and Tatiana Silva and Iacopo Colonnelli and Barbara Cantalupo and Marco Aldinucci and Enzo Tartaglione and Rafael Tornero and José Flich and Jose Maria Martinez and David Rodriguez and Izan Catalán and Jorge Garcia and Carles Hernández},
editor = {Olivier Terzo and Jan Martinovič},
doi = {10.1201/9781003176664},
isbn = {978-1-0320-0984-1},
year = {2022},
date = {2022-01-01},
booktitle = {HPC, Big Data, and AI Convergence Towards Exascale: Challenge and Vision},
pages = {191–216},
publisher = {CRC Press},
address = {Boca Raton, Florida},
chapter = {10},
abstract = {This chapter presents the DeepHealth HPC toolkit for an efficient execution of deep learning (DL) medical application into HPC and cloud-computing infrastructures, featuring many-core, GPU, and FPGA acceleration devices. The toolkit offers to the European Computer Vision Library and the European Distributed Deep Learning Library (EDDL), developed in the DeepHealth project as well, the mechanisms to distribute and parallelize DL operations on HPC and cloud infrastructures in a fully transparent way. The toolkit implements workflow managers used to orchestrate HPC workloads for an efficient parallelization of EDDL training operations on HPC and cloud infrastructures, and includes the parallel programming models for an efficient execution EDDL inference and training operations on many-core, GPUs and FPGAs acceleration devices.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Martin Golasowski, Jan Martinovič, Marc Levrier, Stephan Hachinger, Sophia Karagiorgou, Aikaterini Papapostolou, Spiros Mouzakitis, Ioannis Tsapelas, Monica Caballero, Marco Aldinucci, Jon Ander Gómez, Antony Chazapis, Jean-Thomas Acquaviva
Toward the Convergence of High-Performance Computing, Cloud, and Big Data Domains Book Section
In: Terzo, Olivier, Martinovič, Jan (Ed.): HPC, Big Data, and AI Convergence Towards Exascale: Challenge and Vision, pp. 1–16, CRC Press, Boca Raton, Florida, 2022, ISBN: 978-1-0320-0984-1.
@incollection{22:intro:HPCbook,
title = {Toward the Convergence of High-Performance Computing, Cloud, and Big Data Domains},
author = {Martin Golasowski and Jan Martinovič and Marc Levrier and Stephan Hachinger and Sophia Karagiorgou and Aikaterini Papapostolou and Spiros Mouzakitis and Ioannis Tsapelas and Monica Caballero and Marco Aldinucci and Jon Ander Gómez and Antony Chazapis and Jean-Thomas Acquaviva},
editor = {Olivier Terzo and Jan Martinovič},
doi = {10.1201/9781003176664},
isbn = {978-1-0320-0984-1},
year = {2022},
date = {2022-01-01},
booktitle = {HPC, Big Data, and AI Convergence Towards Exascale: Challenge and Vision},
pages = {1–16},
publisher = {CRC Press},
address = {Boca Raton, Florida},
chapter = {1},
abstract = {Convergence between big data, high-performance computing, and the cloud is the key driving factor for sustainable economic growth in the future. Technological advances in many fields are determined by competence to gain precise information from the large amounts of data collected, which in turn requires powerful computing resources. This chapter provides an overview on the evolution of the three fields and four different points of view on their convergence provided by the CYBELE, DeepHealth, Evolve, and LEXIS projects funded by the European Union under the Horizon 2020 Programme.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Dana Oniga, Barbara Cantalupo, Enzo Tartaglione, Daniele Perlo, Marco Grangetto, Marco Aldinucci, Federico Bolelli, Federico Pollastri, Michele Cancilla, Laura Canalini, Costantino Grana, Cristina Muñoz Alcalde, Franco Alberto Cardillo, Monica Florea
Applications of AI and HPC in the Health Domain Book Section
In: Terzo, Olivier, Martinovič, Jan (Ed.): HPC, Big Data, and AI Convergence Towards Exascale: Challenge and Vision, pp. 217–239, CRC Press, Boca Raton, Florida, 2022, ISBN: 978-1-0320-0984-1.
@incollection{22:applications:HPCbook,
title = {Applications of AI and HPC in the Health Domain},
author = {Dana Oniga and Barbara Cantalupo and Enzo Tartaglione and Daniele Perlo and Marco Grangetto and Marco Aldinucci and Federico Bolelli and Federico Pollastri and Michele Cancilla and Laura Canalini and Costantino Grana and Cristina Muñoz Alcalde and Franco Alberto Cardillo and Monica Florea},
editor = {Olivier Terzo and Jan Martinovič},
doi = {10.1201/9781003176664},
isbn = {978-1-0320-0984-1},
year = {2022},
date = {2022-01-01},
booktitle = {HPC, Big Data, and AI Convergence Towards Exascale: Challenge and Vision},
pages = {217–239},
publisher = {CRC Press},
address = {Boca Raton, Florida},
chapter = {11},
abstract = {This chapter presents the applications of artificial intelligence (AI) and high-computing performance (HPC) in the health domain, illustrated by the description of five of the use cases that are developed in the DeepHealth project. In the context of the European Commission supporting the use of AI and HPC in the health sector, DeepHealth Project is helping health experts process large quantities of images, putting at their disposal DeepLearning and computer vision techniques, combined in the DeepHealth toolkit and HPC infrastructures. The DeepHealth toolkit is tested and validated through 15 use cases, each of them representing a biomedical application. The most promising use cases are described in the chapter, which concludes with the value proposition and the benefits that DeepHealth toolkit offers to future end users.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Iacopo Colonnelli, Marco Aldinucci, Barbara Cantalupo, Luca Padovani, Sergio Rabellino, Concetto Spampinato, Roberto Morelli, Rosario Di Carlo, Nicolò Magini, Carlo Cavazzoni
Distributed workflows with Jupyter Journal Article
In: Future Generation Computer Systems, vol. 128, pp. 282–298, 2022, ISSN: 0167-739X.
@article{21:FGCS:jupyflow,
title = {Distributed workflows with Jupyter},
author = {Iacopo Colonnelli and Marco Aldinucci and Barbara Cantalupo and Luca Padovani and Sergio Rabellino and Concetto Spampinato and Roberto Morelli and Rosario Di Carlo and Nicolò Magini and Carlo Cavazzoni},
doi = {10.1016/j.future.2021.10.007},
issn = {0167-739X},
year = {2022},
date = {2022-01-01},
journal = {Future Generation Computer Systems},
volume = {128},
pages = {282–298},
abstract = {The designers of a new coordination interface enacting complex workflows have to tackle a dichotomy: choosing a language-independent or language-dependent approach. Language-independent approaches decouple workflow models from the host code's business logic and advocate portability. Language-dependent approaches foster flexibility and performance by adopting the same host language for business and coordination code. Jupyter Notebooks, with their capability to describe both imperative and declarative code in a unique format, allow taking the best of the two approaches, maintaining a clear separation between application and coordination layers but still providing a unified interface to both aspects. We advocate the Jupyter Notebooks' potential to express complex distributed workflows, identifying the general requirements for a Jupyter-based Workflow Management System (WMS) and introducing a proof-of-concept portable implementation working on hybrid Cloud-HPC infrastructures. As a byproduct, we extended the vanilla IPython kernel with workflow-based parallel and distributed execution capabilities. The proposed Jupyter-workflow (Jw) system is evaluated on common scenarios for High Performance Computing (HPC) and Cloud, showing its potential in lowering the barriers between prototypical Notebooks and production-ready implementations.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2021
Giovanni Agosta, William Fornaciari, Andrea Galimberti, Giuseppe Massari, Federico Reghenzani, Federico Terraneo, Davide Zoni, Carlo Brandolese, Massimo Celino, Francesco Iannone, Paolo Palazzari, Giuseppe Zummo, Massimo Bernaschi, Pasqua D'Ambra, Sergio Saponara, Marco Danelutto, Massimo Torquati, Marco Aldinucci, Yasir Arfat, Barbara Cantalupo, Iacopo Colonnelli, Roberto Esposito, Alberto Riccardo Martinelli, Gianluca Mittone, Olivier Beaumont, Berenger Bramas, Lionel Eyraud-Dubois, Brice Goglin, Abdou Guermouche, Raymond Namyst, Samuel Thibault, Antonio Filgueras, Miquel Vidal, Carlos Alvarez, Xavier Martorell, Ariel Oleksiak, Michal Kulczewski, Alessandro Lonardo, Piero Vicini, Francesco Lo Cicero, Francesco Simula, Andrea Biagioni, Paolo Cretaro, Ottorino Frezza, Pier Stanislao Paolucci, Matteo Turisini, Francesco Giacomini, Tommaso Boccali, Simone Montangero, Roberto Ammendola
TEXTAROSSA: Towards EXtreme scale Technologies and Accelerators for euROhpc hw/Sw Supercomputing Applications for exascale Proceedings Article
In: Proc. of the 24th Euromicro Conference on Digital System Design (DSD), IEEE, Palermo, Italy, 2021.
@inproceedings{21:DSD:textarossa,
title = {TEXTAROSSA: Towards EXtreme scale Technologies and Accelerators for euROhpc hw/Sw Supercomputing Applications for exascale},
author = {Giovanni Agosta and William Fornaciari and Andrea Galimberti and Giuseppe Massari and Federico Reghenzani and Federico Terraneo and Davide Zoni and Carlo Brandolese and Massimo Celino and Francesco Iannone and Paolo Palazzari and Giuseppe Zummo and Massimo Bernaschi and Pasqua D'Ambra and Sergio Saponara and Marco Danelutto and Massimo Torquati and Marco Aldinucci and Yasir Arfat and Barbara Cantalupo and Iacopo Colonnelli and Roberto Esposito and Alberto Riccardo Martinelli and Gianluca Mittone and Olivier Beaumont and Berenger Bramas and Lionel Eyraud-Dubois and Brice Goglin and Abdou Guermouche and Raymond Namyst and Samuel Thibault and Antonio Filgueras and Miquel Vidal and Carlos Alvarez and Xavier Martorell and Ariel Oleksiak and Michal Kulczewski and Alessandro Lonardo and Piero Vicini and Francesco Lo Cicero and Francesco Simula and Andrea Biagioni and Paolo Cretaro and Ottorino Frezza and Pier Stanislao Paolucci and Matteo Turisini and Francesco Giacomini and Tommaso Boccali and Simone Montangero and Roberto Ammendola},
doi = {10.1109/DSD53832.2021.00051},
year = {2021},
date = {2021-08-01},
booktitle = {Proc. of the 24th Euromicro Conference on Digital System Design (DSD)},
publisher = {IEEE},
address = {Palermo, Italy},
abstract = {To achieve high performance and high energy effi- ciency on near-future exascale computing systems, three key technology gaps needs to be bridged. These gaps include: en- ergy efficiency and thermal control; extreme computation effi- ciency via HW acceleration and new arithmetics; methods and tools for seamless integration of reconfigurable accelerators in heterogeneous HPC multi-node platforms. TEXTAROSSA aims at tackling this gap through a co-design approach to heterogeneous HPC solutions, supported by the integration and extension of HW and SW IPs, programming models and tools derived from European research.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Iacopo Colonnelli, Barbara Cantalupo, Concetto Spampinato, Matteo Pennisi, Marco Aldinucci
Bringing AI pipelines onto cloud-HPC: setting a baseline for accuracy of COVID-19 diagnosis Proceedings Article
In: Iannone, Francesco (Ed.): ENEA CRESCO in the fight against COVID-19, ENEA, 2021.
@inproceedings{21:covi:enea,
title = {Bringing AI pipelines onto cloud-HPC: setting a baseline for accuracy of COVID-19 diagnosis},
author = {Iacopo Colonnelli and Barbara Cantalupo and Concetto Spampinato and Matteo Pennisi and Marco Aldinucci},
editor = {Francesco Iannone},
doi = {10.5281/zenodo.5151511},
year = {2021},
date = {2021-01-01},
booktitle = {ENEA CRESCO in the fight against COVID-19},
publisher = {ENEA},
abstract = {HPC is an enabling platform for AI. The introduction of AI workloads in the HPC applications basket has non-trivial consequences both on the way of designing AI applications and on the way of providing HPC computing. This is the leitmotif of the convergence between HPC and AI. The formalized definition of AI pipelines is one of the milestones of HPC-AI convergence. If well conducted, it allows, on the one hand, to obtain portable and scalable applications. On the other hand, it is crucial for the reproducibility of scientific pipelines. In this work, we advocate the StreamFlow Workflow Management System as a crucial ingredient to define a parametric pipeline, called ``CLAIRE COVID-19 Universal Pipeline'', which is able to explore the optimization space of methods to classify COVID-19 lung lesions from CT scans, compare them for accuracy, and therefore set a performance baseline. The universal pipeline automatizes the training of many different Deep Neural Networks (DNNs) and many different hyperparameters. It, therefore, requires a massive computing power, which is found in traditional HPC infrastructure thanks to the portability-by-design of pipelines designed with StreamFlow. Using the universal pipeline, we identified a DNN reaching over 90% accuracy in detecting COVID-19 lesions in CT scans.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Iacopo Colonnelli, Barbara Cantalupo, Roberto Esposito, Matteo Pennisi, Concetto Spampinato, Marco Aldinucci
HPC Application Cloudification: The StreamFlow Toolkit Proceedings Article
In: Bispo, João, Cherubin, Stefano, Flich, José (Ed.): 12th Workshop on Parallel Programming and Run-Time Management Techniques for Many-core Architectures and 10th Workshop on Design Tools and Architectures for Multicore Embedded Computing Platforms (PARMA-DITAM 2021), pp. 5:1–5:13, Schloss Dagstuhl – Leibniz-Zentrum für Informatik, Dagstuhl, Germany, 2021, ISSN: 2190-6807.
@inproceedings{colonnelli_et_al:OASIcs.PARMA-DITAM.2021.5,
title = {HPC Application Cloudification: The StreamFlow Toolkit},
author = {Iacopo Colonnelli and Barbara Cantalupo and Roberto Esposito and Matteo Pennisi and Concetto Spampinato and Marco Aldinucci},
editor = {João Bispo and Stefano Cherubin and José Flich},
doi = {10.4230/OASIcs.PARMA-DITAM.2021.5},
issn = {2190-6807},
year = {2021},
date = {2021-01-01},
booktitle = {12th Workshop on Parallel Programming and Run-Time Management Techniques for Many-core Architectures and 10th Workshop on Design Tools and Architectures for Multicore Embedded Computing Platforms (PARMA-DITAM 2021)},
volume = {88},
pages = {5:1–5:13},
publisher = {Schloss Dagstuhl – Leibniz-Zentrum für Informatik},
address = {Dagstuhl, Germany},
series = {Open Access Series in Informatics (OASIcs)},
abstract = {Finding an effective way to improve accessibility to High-Performance Computing facilities, still anchored to SSH-based remote shells and queue-based job submission mechanisms, is an open problem in computer science. This work advocates a cloudification of HPC applications through a cluster-as-accelerator pattern, where computationally demanding portions of the main execution flow hosted on a Cloud Finding an effective way to improve accessibility to High-Performance Computing facilities, still anchored to SSH-based remote shells and queue-based job submission mechanisms, is an open problem in computer science. This work advocates a cloudification of HPC applications through a cluster-as-accelerator pattern, where computationally demanding portions of the main execution flow hosted on a Cloud infrastructure can be offloaded to HPC environments to speed them up. We introduce StreamFlow, a novel Workflow Management System that supports such a design pattern and makes it possible to run the steps of a standard workflow model on independent processing elements with no shared storage. We validated the proposed approach's effectiveness on the CLAIRE COVID-19 universal pipeline, i.e. a reproducible workflow capable of automating the comparison of (possibly all) state-of-the-art pipelines for the diagnosis of COVID-19 interstitial pneumonia from CT scans images based on Deep Neural Networks (DNNs).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Iacopo Colonnelli, Barbara Cantalupo, Ivan Merelli, Marco Aldinucci
StreamFlow: cross-breeding cloud with HPC Journal Article
In: IEEE Transactions on Emerging Topics in Computing, vol. 9, no. 4, pp. 1723–1737, 2021.
@article{20Lstreamflow:tetc,
title = {StreamFlow: cross-breeding cloud with HPC},
author = {Iacopo Colonnelli and Barbara Cantalupo and Ivan Merelli and Marco Aldinucci},
doi = {10.1109/TETC.2020.3019202},
year = {2021},
date = {2021-01-01},
journal = {IEEE Transactions on Emerging Topics in Computing},
volume = {9},
number = {4},
pages = {1723–1737},
abstract = {Workflows are among the most commonly used tools in a variety of execution environments. Many of them target a specific environment; few of them make it possible to execute an entire workflow in different environments, e.g. Kubernetes and batch clusters. We present a novel approach to workflow execution, called StreamFlow, that complements the workflow graph with the declarative description of potentially complex execution environments, and that makes it possible the execution onto multiple sites not sharing a common data space. StreamFlow is then exemplified on a novel bioinformatics pipeline for single cell transcriptomic data analysis workflow.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}