ARS
Pubblicazioni
Pubblicazioni del progetto ARS
2020 |
Andrea Roberti Nicola Piccinelli, Daniele Meli Riccardo Muradore Paolo Fiorini Improving rigid 3D calibration for robotic surgery Journal Article IEEE Transactions on Medical Robotics and Bionics (special issue for Hamlyn Symposium on Medical Robotics 2020), pp. 569 - 573, 2020, ISBN: 2576-3202 . @article{Roberti2020, title = {Improving rigid 3D calibration for robotic surgery}, author = {Andrea Roberti, Nicola Piccinelli, Daniele Meli, Riccardo Muradore, Paolo Fiorini}, editor = {IEEE Transactions on Medical Robotics and Bionics (special issue for Hamlyn Symposium on Medical Robotics 2020)}, url = {https://ieeexplore.ieee.org/document/9239343}, doi = {https://doi.org/10.1109/TMRB.2020.3033670}, isbn = {2576-3202 }, year = {2020}, date = {2020-10-26}, journal = {IEEE Transactions on Medical Robotics and Bionics (special issue for Hamlyn Symposium on Medical Robotics 2020)}, pages = {569 - 573}, abstract = {Autonomy is the next frontier of research in robotic surgery and its aim is to improve the quality of surgical procedures in the next future. One fundamental requirement for autonomy is advanced perception capability through vision sensors. In this article, we propose a novel calibration technique for a surgical scenario with a da Vinci ® Research Kit (dVRK) robot. Camera and robotic arms calibration are necessary to precise position and emulate expert surgeon. The novel calibration technique is tailored for RGB-D cameras. Different tests performed on relevant use cases prove that we significantly improve precision and accuracy with respect to state of the art solutions for similar devices on a surgical-size setups. Moreover, our calibration method can be easily extended to standard surgical endoscope used in real surgical scenario.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Autonomy is the next frontier of research in robotic surgery and its aim is to improve the quality of surgical procedures in the next future. One fundamental requirement for autonomy is advanced perception capability through vision sensors. In this article, we propose a novel calibration technique for a surgical scenario with a da Vinci ® Research Kit (dVRK) robot. Camera and robotic arms calibration are necessary to precise position and emulate expert surgeon. The novel calibration technique is tailored for RGB-D cameras. Different tests performed on relevant use cases prove that we significantly improve precision and accuracy with respect to state of the art solutions for similar devices on a surgical-size setups. Moreover, our calibration method can be easily extended to standard surgical endoscope used in real surgical scenario. |
Daniele Meli Paolo Fiorini, Mohan Sridharan Towards inductive learning of surgical task knowledge: a preliminary case study of the peg transfer task Journal Article Procedia Computer Science, pp. 440-449, 2020, ISBN: 1877-0509. @article{Meli2020, title = {Towards inductive learning of surgical task knowledge: a preliminary case study of the peg transfer task}, author = {Daniele Meli, Paolo Fiorini, Mohan Sridharan}, editor = {Procedia of Computer Science - special issue Knowledge-Based and Intelligent Information & Engineering Systems: Proceedings of the 24th International Conference KES2020}, url = {http://hdl.handle.net/11562/1027998}, doi = {https://doi.org/10.1016/j.procs.2020.08.046}, isbn = {1877-0509}, year = {2020}, date = {2020-06-17}, journal = {Procedia Computer Science}, pages = {440-449}, abstract = {Autonomy in robotic surgery will significantly improve the quality of interventions in terms of safety and recovery time for the patient, and reduce fatigue of surgeons and hospital costs. A key requirement for such autonomy is the ability of the surgical system to encode and reason with commonsense task knowledge, and to adapt to variations introduced by the surgical scenarios and the individual patients. However, it is difficult to encode all the variability in surgical scenarios and in the anatomy of individual patients a priori, and new knowledge often needs to be acquired and merged with the existing knowledge. At the same time, it is not possible to provide a large number of labeled training examples in the robotic surgery. This paper presents a framework based on inductive logic programming and answer set semantics for incrementally learning domain knowledge from a limited number of executions of basic surgical tasks. As an illustrative example, we focus on the peg transfer task, and learn state constraints and the preconditions of actions starting from different levels of prior knowledge. We do so using a small dataset comprising human and robotic executions with the da Vinci surgical robot in a challenging simulated scenario.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Autonomy in robotic surgery will significantly improve the quality of interventions in terms of safety and recovery time for the patient, and reduce fatigue of surgeons and hospital costs. A key requirement for such autonomy is the ability of the surgical system to encode and reason with commonsense task knowledge, and to adapt to variations introduced by the surgical scenarios and the individual patients. However, it is difficult to encode all the variability in surgical scenarios and in the anatomy of individual patients a priori, and new knowledge often needs to be acquired and merged with the existing knowledge. At the same time, it is not possible to provide a large number of labeled training examples in the robotic surgery. This paper presents a framework based on inductive logic programming and answer set semantics for incrementally learning domain knowledge from a limited number of executions of basic surgical tasks. As an illustrative example, we focus on the peg transfer task, and learn state constraints and the preconditions of actions starting from different levels of prior knowledge. We do so using a small dataset comprising human and robotic executions with the da Vinci surgical robot in a challenging simulated scenario. |
Eleonora Tagliabue Diego Dall’Alba, Enrico Magnabosco Igor Peterlik Paolo Fiorini Biomechanical modelling of probe to tissue interaction during ultrasound scanning Journal Article International Journal of Computer Assisted Radiology and Surgery, 2020. @article{Tagliabue2020, title = {Biomechanical modelling of probe to tissue interaction during ultrasound scanning}, author = {Eleonora Tagliabue, Diego Dall’Alba, Enrico Magnabosco, Igor Peterlik, Paolo Fiorini}, editor = {Springer}, url = {https://link.springer.com/article/10.1007/s11548-020-02183-2?wt_mc=Internal.Event.1.SEM.ArticleAuthorOnlineFirst&utm_source=ArticleAuthorOnlineFirst&utm_medium=email&utm_content=AA_en_06082018&ArticleAuthorOnlineFirst_20200523#citeas}, doi = {https://doi.org/10.1007/s11548-020-02183-2}, year = {2020}, date = {2020-05-22}, journal = {International Journal of Computer Assisted Radiology and Surgery}, abstract = {Purpose Biomechanical simulation of anatomical deformations caused by ultrasound probe pressure is of outstanding importance for several applications, from the testing of robotic acquisition systems to multi-modal image fusion and development of ultrasound training platforms. Different approaches can be exploited for modelling the probe–tissue interaction, each achieving different trade-offs among accuracy, computation time and stability. Methods We assess the performances of different strategies based on the finite element method for modelling the interaction between the rigid probe and soft tissues. Probe–tissue contact is modelled using (i) penalty forces, (ii) constraint forces, and (iii) by prescribing the displacement of the mesh surface nodes. These methods are tested in the challenging context of ultrasound scanning of the breast, an organ undergoing large nonlinear deformations during the procedure. Results The obtained results are evaluated against those of a non-physically based method. While all methods achieve similar accuracy, performance in terms of stability and speed shows high variability, especially for those methods modelling the contacts explicitly. Overall, prescribing surface displacements is the approach with best performances, but it requires prior knowledge of the contact area and probe trajectory. Conclusions In this work, we present different strategies for modelling probe–tissue interaction, each able to achieve different compromises among accuracy, speed and stability. The choice of the preferred approach highly depends on the requirements of the specific clinical application. Since the presented methodologies can be applied to describe general tool–tissue interactions, this work can be seen as a reference for researchers seeking the most appropriate strategy to model anatomical deformation induced by the interaction with medical tools.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Purpose Biomechanical simulation of anatomical deformations caused by ultrasound probe pressure is of outstanding importance for several applications, from the testing of robotic acquisition systems to multi-modal image fusion and development of ultrasound training platforms. Different approaches can be exploited for modelling the probe–tissue interaction, each achieving different trade-offs among accuracy, computation time and stability. Methods We assess the performances of different strategies based on the finite element method for modelling the interaction between the rigid probe and soft tissues. Probe–tissue contact is modelled using (i) penalty forces, (ii) constraint forces, and (iii) by prescribing the displacement of the mesh surface nodes. These methods are tested in the challenging context of ultrasound scanning of the breast, an organ undergoing large nonlinear deformations during the procedure. Results The obtained results are evaluated against those of a non-physically based method. While all methods achieve similar accuracy, performance in terms of stability and speed shows high variability, especially for those methods modelling the contacts explicitly. Overall, prescribing surface displacements is the approach with best performances, but it requires prior knowledge of the contact area and probe trajectory. Conclusions In this work, we present different strategies for modelling probe–tissue interaction, each able to achieve different compromises among accuracy, speed and stability. The choice of the preferred approach highly depends on the requirements of the specific clinical application. Since the presented methodologies can be applied to describe general tool–tissue interactions, this work can be seen as a reference for researchers seeking the most appropriate strategy to model anatomical deformation induced by the interaction with medical tools. |
2019 |
Tagliabue E Dall’Alba D, Magnabosco Tenga Peterlik Courtecouisse Fiorini E C I H P Position-based simulation of deformations for autonomous robotic ultrasound scanning. Proceeding I-RIM Conference Rome, (Italy), 2019. @proceedings{E2019b, title = {Position-based simulation of deformations for autonomous robotic ultrasound scanning.}, author = {Tagliabue E, Dall’Alba D, Magnabosco E, Tenga C, Peterlik I, Courtecouisse H, Fiorini P}, editor = { I-RIM Conference}, url = {https://drive.google.com/file/d/1D-ErdqroMPMbjW7WeTeZV2R7R87RCCXs/view}, year = {2019}, date = {2019-10-18}, address = {Rome, (Italy)}, organization = { I-RIM Conference}, abstract = {Realistic and fast simulation of anatomical deformations due to ultrasound probe pressure is of outstanding importance for testing and validation of autonomous robotic ultrasound systems. We propose a deformation model which relies on the position-based dynamics (PBD) approach to simulate the probetissue interaction and predict the displacement of internal targets during US acquisition. Performances of the patient-specific PBD anatomical model are evaluated in comparison to two different simulations relying on the traditional finite element (FE) method, in the context of breast ultrasound scanning. Localization error obtained when applying the PBD model remains below 11 mm for all the tumors even for input displacements in the order of 30 mm. The proposed method is able to achieve a better trade-off among accuracy, computation time and generalization capabilities with respect to the two FE models. Position-based dynamics approach has proved to be successful in modeling breast tissue deformations during US acquisition. It represents a valid alternative to classical FE methods for simulating the interaction between US probe and tissues.}, keywords = {}, pubstate = {published}, tppubtype = {proceedings} } Realistic and fast simulation of anatomical deformations due to ultrasound probe pressure is of outstanding importance for testing and validation of autonomous robotic ultrasound systems. We propose a deformation model which relies on the position-based dynamics (PBD) approach to simulate the probetissue interaction and predict the displacement of internal targets during US acquisition. Performances of the patient-specific PBD anatomical model are evaluated in comparison to two different simulations relying on the traditional finite element (FE) method, in the context of breast ultrasound scanning. Localization error obtained when applying the PBD model remains below 11 mm for all the tumors even for input displacements in the order of 30 mm. The proposed method is able to achieve a better trade-off among accuracy, computation time and generalization capabilities with respect to the two FE models. Position-based dynamics approach has proved to be successful in modeling breast tissue deformations during US acquisition. It represents a valid alternative to classical FE methods for simulating the interaction between US probe and tissues. |
Dall’Alba D Tagliabue E, Magnabosco Tenga Fiorini E C P The Hamlyn Symposium on Medical Robotics London (UK), 2019. @proceedings{, title = {Real-time prediction of breast lesions displacement during Ultrasound scanning using a position-based dynamics approach}, author = {Dall’Alba D, Tagliabue E, Magnabosco E, Tenga C, Fiorini P}, editor = {The Hamlyn Symposium on Medical Robotics }, url = {https://www.ukras.org/wp-content/uploads/2019/06/proceedings_HSMR19-MK-reduced.pdf}, doi = {10.31256/HSMR2019.14}, year = {2019}, date = {2019-10-13}, pages = {27-28}, address = {London (UK)}, organization = {The Hamlyn Symposium on Medical Robotics }, keywords = {}, pubstate = {published}, tppubtype = {proceedings} } |
Menegozzo, Giovanni; Dall’Alba, Diego; Roberti, Andrea; Fiorini, Paolo Automatic process modeling with time delay neural network based on low-level data. Journal Article Procedia Manufacturing, 38 , pp. 125–132, 2019. @article{menegozzo2019automatic, title = {Automatic process modeling with time delay neural network based on low-level data.}, author = {Giovanni Menegozzo and Diego Dall’Alba and Andrea Roberti and Paolo Fiorini}, url = {https://www.sciencedirect.com/science/article/pii/S2351978920300172/pdf?md5=92de1e9af7b7717c4a79e0a7ab8872f4&pid=1-s2.0-S2351978920300172-main.pdf}, year = {2019}, date = {2019-06-24}, journal = {Procedia Manufacturing}, volume = {38}, pages = {125--132}, publisher = {Elsevier}, abstract = {Automatic process modelling (APM) is an enabling technology for the development of intelligent manufacturingsystems (IMSs). The analysis of obtained models enables the prompt detection of error-prone steps and the design of proper mitigation strategies, in all aspects of the manufacturing process, from parameter optimization to development of customized personnel training. In this work we propose a Time Delay Neural Network (TDNN) applied to low level data for the automatic recognition of different process phases in industrial collaborative tasks. We selected TDNN because they are suited for modelling time dependent processes over long sequences while maintaining computational efficiency. To experimentally evaluate the recognition performance and the generalization capability of the proposed method, we acquired two novel datasets reproducing a typical IMS setting. Datasets (including manually annotated ground-truth labels) are publicly available to enable other methods to be tested on them and they replicate typical Industry 4.0 setting. The first dataset replicates a collaborative robotic environment where a human operator interacts with a robotic manipulator in the execution of a pick and place task. The second set represents a human tele-operated robotic assisted manipulation for assembly applications. The obtained results are superior to other methods available in literature and demonstrate an improved computational performance.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Automatic process modelling (APM) is an enabling technology for the development of intelligent manufacturingsystems (IMSs). The analysis of obtained models enables the prompt detection of error-prone steps and the design of proper mitigation strategies, in all aspects of the manufacturing process, from parameter optimization to development of customized personnel training. In this work we propose a Time Delay Neural Network (TDNN) applied to low level data for the automatic recognition of different process phases in industrial collaborative tasks. We selected TDNN because they are suited for modelling time dependent processes over long sequences while maintaining computational efficiency. To experimentally evaluate the recognition performance and the generalization capability of the proposed method, we acquired two novel datasets reproducing a typical IMS setting. Datasets (including manually annotated ground-truth labels) are publicly available to enable other methods to be tested on them and they replicate typical Industry 4.0 setting. The first dataset replicates a collaborative robotic environment where a human operator interacts with a robotic manipulator in the execution of a pick and place task. The second set represents a human tele-operated robotic assisted manipulation for assembly applications. The obtained results are superior to other methods available in literature and demonstrate an improved computational performance. |
Fiorini, P; Alba, D; Ginesi, Michele; Maris, Bogdan; Meli, Daniele; Nakawala, Hirenkumar; Roberti, Andrea Challenges of Autonomous Robotic Surgery Proceeding The Hamlyn Symposium on Medical Robotics London (UK), 2019. @proceedings{inproceedings, title = {Challenges of Autonomous Robotic Surgery}, author = {P Fiorini and D Alba and Michele Ginesi and Bogdan Maris and Daniele Meli and Hirenkumar Nakawala and Andrea Roberti}, editor = {The Hamlyn Symposium on Medical Robotics}, url = {https://www.ukras.org/wp-content/uploads/2019/06/proceedings_HSMR19-MK-reduced.pdf}, doi = {10.31256/HSMR2019.53}, year = {2019}, date = {2019-06-23}, pages = {105-106}, address = {London (UK)}, organization = {The Hamlyn Symposium on Medical Robotics}, keywords = {}, pubstate = {published}, tppubtype = {proceedings} } |
Menegozzo, Giovanni; Dall’Alba, Diego; `a, Chiara Zandon; Fiorini, Paolo Surgical gesture recognition with time delay neural network based on kinematic data Proceeding IEEE International Symposium on Medical Robotics (ISMR), Georgia (USA), 2019. @proceedings{menegozzo2019surgical, title = {Surgical gesture recognition with time delay neural network based on kinematic data}, author = {Giovanni Menegozzo and Diego Dall’Alba and Chiara Zandon{`a} and Paolo Fiorini}, url = {10.1109/ismr.2019.8710178}, year = {2019}, date = {2019-04-03}, booktitle = {2019 International Symposium on Medical Robotics (ISMR)}, pages = {1--7}, publisher = {International Symposium on Medical Robotics (ISMR)}, address = {Georgia (USA)}, organization = {IEEE}, abstract = {Abstract—Automatic gesture recognition during surgical procedures is an enabling technology for improving advanced assistance features in surgical robotic systems (SRSs). Examples of such advanced features are user-specific feedback during execution of complex actions, prompt detection of safety-critical situations and autonomous execution of procedure sub-steps. Video data are available for all minimally invasive surgical procedures, but SRS could also provide accurate movements measurements based on kinematic data. Kinematic data provide low dimensional features for gesture recognition that would enable on-line processing during data acquisition. Therefore, we propose a Time Delay Neural Network (TDNN) applied to kinematic data for introducing temporal modelling in gesture recognition. We evaluate accuracy and precision of the proposed method on public benchmark dataset for surgical gesture recognition (JIGSAWS). To evaluate the generalization capability of the proposed method, we acquired a new dataset introducing a different training exercise executed in virtual environment. The dataset is publicly available to enable other methods to be tested on it. The obtained results are comparable with other methods available in literature keeping also computational performance compatible with on-line processing during surgical procedure. The proposed method and the novel dataset are key-components in the development of future autonomous SRSs with advanced situation awareness capabilities. Index Terms—Time Delay Neural Network, TDNN, surgical gesture segmentation,}, keywords = {}, pubstate = {published}, tppubtype = {proceedings} } Abstract—Automatic gesture recognition during surgical procedures is an enabling technology for improving advanced assistance features in surgical robotic systems (SRSs). Examples of such advanced features are user-specific feedback during execution of complex actions, prompt detection of safety-critical situations and autonomous execution of procedure sub-steps. Video data are available for all minimally invasive surgical procedures, but SRS could also provide accurate movements measurements based on kinematic data. Kinematic data provide low dimensional features for gesture recognition that would enable on-line processing during data acquisition. Therefore, we propose a Time Delay Neural Network (TDNN) applied to kinematic data for introducing temporal modelling in gesture recognition. We evaluate accuracy and precision of the proposed method on public benchmark dataset for surgical gesture recognition (JIGSAWS). To evaluate the generalization capability of the proposed method, we acquired a new dataset introducing a different training exercise executed in virtual environment. The dataset is publicly available to enable other methods to be tested on it. The obtained results are comparable with other methods available in literature keeping also computational performance compatible with on-line processing during surgical procedure. The proposed method and the novel dataset are key-components in the development of future autonomous SRSs with advanced situation awareness capabilities. Index Terms—Time Delay Neural Network, TDNN, surgical gesture segmentation, |
Tagliabue E Dall’Alba D, Magnabosco Tenga Fiorini E C P 9th Joint Workshop on New Technologies for Computer/Robot Assisted Surgery CRAS Genoa (Italy), 2019. @proceedings{E2019c, title = {A position-based framework for the prediction of probe-induced lesion displacement in Ultrasound-guided breast biopsy.}, author = {Tagliabue E, Dall’Alba D, Magnabosco E, Tenga C, Fiorini P}, editor = {9th Joint Workshop on New Technologies for Computer/Robot Assisted Surgery CRAS}, url = {https://cras-eu.org/wp-content/uploads/2019/11/CRAS_2019_proceedings_official.pdf}, year = {2019}, date = {2019-03-21}, address = {Genoa (Italy)}, organization = {9th Joint Workshop on New Technologies for Computer/Robot Assisted Surgery CRAS}, keywords = {}, pubstate = {published}, tppubtype = {proceedings} } |
G. Menegozzo D. Dall’Alba, Zandona C; Fiorini, P Surgical Gesture and Error Recognition with Time Delay Neural Network on Kinematic Data Proceeding 9th Joint Workshop on New Technologies for Computer/Robot Assisted Surgery CRAS, Genoa, Italy, 2019. @proceedings{Menegozzo2019c, title = {Surgical Gesture and Error Recognition with Time Delay Neural Network on Kinematic Data}, author = {G. Menegozzo, D. Dall’Alba, C. Zandona and P. Fiorini}, editor = {9th Joint Workshop on New Technologies for Computer/Robot Assisted Surgery CRAS, Genoa, Italy}, url = {http://hdl.handle.net/11562/1020321}, year = {2019}, date = {2019-03-21}, publisher = {9th Joint Workshop on New Technologies for Computer/Robot Assisted Surgery CRAS, Genoa, Italy}, keywords = {}, pubstate = {published}, tppubtype = {proceedings} } |
Ginesi, Michele; Meli, Daniele; Nakawala, Hirenkumar; Roberti, Andrea; Fiorini, Paolo A knowledge-based framework for task automation in surgery Proceeding 2019 19th International Conference on Advanced Robotics (ICAR) IEEE, 2019. @proceedings{ginesi2019knowledge, title = {A knowledge-based framework for task automation in surgery}, author = {Michele Ginesi and Daniele Meli and Hirenkumar Nakawala and Andrea Roberti and Paolo Fiorini}, url = {https://ieeexplore.ieee.org/document/8981619}, doi = {https://doi.org/10.1109/ICAR46387.2019.8981619}, year = {2019}, date = {2019-01-01}, booktitle = {2019 19th International Conference on Advanced Robotics (ICAR)}, pages = {37--42}, publisher = {IEEE}, organization = {2019 19th International Conference on Advanced Robotics (ICAR)}, abstract = {Robotic surgery has significantly improved the quality of surgical procedures. In the past, researches have been focused on automating simple surgical actions. However, there exists no scalable framework for automation in surgery. In this paper, we present a knowledge-based modular framework for the automation of articulated surgical tasks, for example, with multiple coordinated actions. The framework is consisted of ontology, providing entities for surgical automation and rules for task planning, and “dynamic movement primitives” as adaptive motion planner as to replicate the dexterity of surgeons. To validate our framework, we chose a paradigmatic scenario of a peg-and-ring task, a standard training exercise for novice surgeons which presents many challenges of real surgery, e.g. grasping and transferring. Experiments show the validity of the framework and its adaptability to faulty events. The modular architecture is expected to generalize to different tasks and platforms.}, keywords = {}, pubstate = {published}, tppubtype = {proceedings} } Robotic surgery has significantly improved the quality of surgical procedures. In the past, researches have been focused on automating simple surgical actions. However, there exists no scalable framework for automation in surgery. In this paper, we present a knowledge-based modular framework for the automation of articulated surgical tasks, for example, with multiple coordinated actions. The framework is consisted of ontology, providing entities for surgical automation and rules for task planning, and “dynamic movement primitives” as adaptive motion planner as to replicate the dexterity of surgeons. To validate our framework, we chose a paradigmatic scenario of a peg-and-ring task, a standard training exercise for novice surgeons which presents many challenges of real surgery, e.g. grasping and transferring. Experiments show the validity of the framework and its adaptability to faulty events. The modular architecture is expected to generalize to different tasks and platforms. |
Ginesi, Michele; Meli, Daniele; Calanca, Andrea; DallÁlba, Diego; Sansonetto, Nicola; Fiorini, Paolo Dynamic Movement Primitives: Volumetric Obstacle Avoidance Proceeding 2019 19th International Conference on Advanced Robotics (ICAR) IEEE, 2019. @proceedings{ginesi2019dynamic, title = {Dynamic Movement Primitives: Volumetric Obstacle Avoidance}, author = {Michele Ginesi and Daniele Meli and Andrea Calanca and Diego DallÁlba and Nicola Sansonetto and Paolo Fiorini}, url = {https://ieeexplore.ieee.org/abstract/document/8981552}, doi = {10.1109/ICAR46387.2019.8981552}, year = {2019}, date = {2019-01-01}, booktitle = {2019 19th International Conference on Advanced Robotics (ICAR)}, pages = {234--239}, publisher = {IEEE}, organization = {2019 19th International Conference on Advanced Robotics (ICAR)}, abstract = {Dynamic Movement Primitives (DMPs) are a framework for learning a trajectory from a demonstration. The trajectory can be learned efficiently after only one demonstration, and it is immediate to adapt it to new goal positions and time duration. Moreover, the trajectory is also robust against perturbations. However, obstacle avoidance for DMPs is still an open problem. In this work, we propose an extension of DMPs to support volumetric obstacle avoidance based on the use of superquadric potentials. We show the advantages of this approach when obstacles have known shape, and we extend it to unknown objects using minimal enclosing ellipsoids. A simulation and experiments with a real robot validate the framework, and we make freely available our implementation.}, keywords = {}, pubstate = {published}, tppubtype = {proceedings} } Dynamic Movement Primitives (DMPs) are a framework for learning a trajectory from a demonstration. The trajectory can be learned efficiently after only one demonstration, and it is immediate to adapt it to new goal positions and time duration. Moreover, the trajectory is also robust against perturbations. However, obstacle avoidance for DMPs is still an open problem. In this work, we propose an extension of DMPs to support volumetric obstacle avoidance based on the use of superquadric potentials. We show the advantages of this approach when obstacles have known shape, and we extend it to unknown objects using minimal enclosing ellipsoids. A simulation and experiments with a real robot validate the framework, and we make freely available our implementation. |
Tagliabue, Eleonora; Dall’Alba, Diego; Magnabosco, Enrico; Tenga, Chiara; 'i, Igor Peterl; Fiorini, Paolo Position-based modeling of lesion displacement in Ultrasound-guided breast biopsy Journal Article International journal of computer assisted radiology and surgery, 14 (8), pp. 1329–1339, 2019. @article{tagliabue2019positionb, title = {Position-based modeling of lesion displacement in Ultrasound-guided breast biopsy}, author = {Eleonora Tagliabue and Diego Dall’Alba and Enrico Magnabosco and Chiara Tenga and Igor Peterl{'i}k and Paolo Fiorini}, url = {https://link.springer.com/article/10.1007/s11548-019-01997-z}, doi = {https://doi.org/10.1007/s11548-019-01997-z}, year = {2019}, date = {2019-01-01}, journal = {International journal of computer assisted radiology and surgery}, volume = {14}, number = {8}, pages = {1329--1339}, publisher = {Springer}, abstract = {Purpose Although ultrasound (US) images represent the most popular modality for guiding breast biopsy, malignant regions are often missed by sonography, thus preventing accurate lesion localization which is essential for a successful procedure. Biomechanical models can support the localization of suspicious areas identified on a preoperative image during US scanning since they are able to account for anatomical deformations resulting from US probe pressure. We propose a deformation model which relies on position-based dynamics (PBD) approach to predict the displacement of internal targets induced by probe interaction during US acquisition. Methods The PBD implementation available in NVIDIA FleX is exploited to create an anatomical model capable of deforming online. Simulation parameters are initialized on a calibration phantom under different levels of probe-induced deformations; then, they are fine-tuned by minimizing the localization error of a US–visible landmark of a realistic breast phantom. The updated model is used to estimate the displacement of other internal lesions due to probe-tissue interaction. Results The localization error obtained when applying the PBD model remains below 11 mm for all the tumors even for input displacements in the order of 30 mm. This proposed method obtains results aligned with FE models with faster computational performance, suitable for real-time applications. In addition, it outperforms rigid model used to track lesion position in US-guided breast biopsies, at least halving the localization error for all the displacement ranges considered. Conclusion Position-based dynamics approach has proved to be successful in modeling breast tissue deformations during US acquisition. Its stability, accuracy and real-time performance make such model suitable for tracking lesions displacement during US-guided breast biopsy.}, keywords = {}, pubstate = {published}, tppubtype = {article} } Purpose Although ultrasound (US) images represent the most popular modality for guiding breast biopsy, malignant regions are often missed by sonography, thus preventing accurate lesion localization which is essential for a successful procedure. Biomechanical models can support the localization of suspicious areas identified on a preoperative image during US scanning since they are able to account for anatomical deformations resulting from US probe pressure. We propose a deformation model which relies on position-based dynamics (PBD) approach to predict the displacement of internal targets induced by probe interaction during US acquisition. Methods The PBD implementation available in NVIDIA FleX is exploited to create an anatomical model capable of deforming online. Simulation parameters are initialized on a calibration phantom under different levels of probe-induced deformations; then, they are fine-tuned by minimizing the localization error of a US–visible landmark of a realistic breast phantom. The updated model is used to estimate the displacement of other internal lesions due to probe-tissue interaction. Results The localization error obtained when applying the PBD model remains below 11 mm for all the tumors even for input displacements in the order of 30 mm. This proposed method obtains results aligned with FE models with faster computational performance, suitable for real-time applications. In addition, it outperforms rigid model used to track lesion position in US-guided breast biopsies, at least halving the localization error for all the displacement ranges considered. Conclusion Position-based dynamics approach has proved to be successful in modeling breast tissue deformations during US acquisition. Its stability, accuracy and real-time performance make such model suitable for tracking lesions displacement during US-guided breast biopsy. |
Mendizabal, Andrea; Tagliabue, Eleonora; Brunet, Jean-Nicolas; Dallálba, Diego; Fiorini, Paolo; Cotin, Stéphane Physics-based Deep Neural Network for Real-Time Lesion Tracking in Ultrasound-guided Breast Biopsy Workshop Computational biomechanics for medicine workshop, MICCAI Shenzhen, China, 2019. @workshop{mendizabal:hal-02311277, title = {Physics-based Deep Neural Network for Real-Time Lesion Tracking in Ultrasound-guided Breast Biopsy}, author = {Andrea Mendizabal and Eleonora Tagliabue and Jean-Nicolas Brunet and Diego Dallálba and Paolo Fiorini and Stéphane Cotin}, editor = {Computational biomechanics for medicine workshop at MICCAI}, url = {https://hal.inria.fr/hal-02311277}, year = {2019}, date = {2019-01-01}, booktitle = {Computational biomechanics for medicine workshop}, address = {Shenzhen, China}, organization = {MICCAI}, keywords = {}, pubstate = {published}, tppubtype = {workshop} } |
Z. Cheng D. Dall'Alba, Foti Mariani Chupin Caldwell Fiorini De Momi Ferrigno S A T D P E G; Mattos, L Design and integration of electrical bio-impedance sensing in surgical robotic tools for tissue identification Journal Article 8th joint workshop on New Technologies for Computer/Robot Assisted Surgery, 6 , pp. 55, 2019. @article{cheng2019design, title = {Design and integration of electrical bio-impedance sensing in surgical robotic tools for tissue identification}, author = {Z. Cheng, D. Dall'Alba, S. Foti, A. Mariani, T. Chupin, D. Caldwell, P. Fiorini, E. De Momi, G. Ferrigno and L. Mattos}, doi = {http://hdl.handle.net/11562/1018566}, year = {2019}, date = {2019-01-01}, journal = {8th joint workshop on New Technologies for Computer/Robot Assisted Surgery}, volume = {6}, pages = {55}, publisher = {Frontiers}, keywords = {}, pubstate = {published}, tppubtype = {article} } |
Cheng, Zhuoqi; Dall’Alba, Diego; Caldwell, Darwin G; Fiorini, Paolo; Mattos, Leonardo S XVII International Conference on Electrical Bioimpedance, Joinville, pp. 3–10, Brazil, 2019. @inproceedings{cheng2019designb, title = {Design and Integration of Electrical Bio-Impedance Sensing in a Bipolar Forceps for Soft Tissue Identification: A Feasibility Study}, author = {Zhuoqi Cheng and Diego Dall’Alba and Darwin G Caldwell and Paolo Fiorini and Leonardo S Mattos}, url = {https://iris.univr.it/}, year = {2019}, date = {2019-01-01}, booktitle = {XVII International Conference on Electrical Bioimpedance, Joinville}, pages = {3--10}, address = {Brazil}, abstract = {This paper presents the integration of electrical bio-impedance sensing technology into a bipolar surgical forceps for soft tissue identification during a robotic assisted procedure. The EBI sensing is done by pressing the forceps on the target tissue with a controlled pressing depth and a controlled jaw opening distance. The impact of these 2 parameters are characterized by finite element simulation. Subsequently, an experiment is conducted with 4 types of ex-vivo tissues including liver, kidney, lung and muscle. The experimental results demonstrate that the proposed EBI sensing method can identify these 4 tissue types with an accuracy higher than 92.82%.}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } This paper presents the integration of electrical bio-impedance sensing technology into a bipolar surgical forceps for soft tissue identification during a robotic assisted procedure. The EBI sensing is done by pressing the forceps on the target tissue with a controlled pressing depth and a controlled jaw opening distance. The impact of these 2 parameters are characterized by finite element simulation. Subsequently, an experiment is conducted with 4 types of ex-vivo tissues including liver, kidney, lung and muscle. The experimental results demonstrate that the proposed EBI sensing method can identify these 4 tissue types with an accuracy higher than 92.82%. |
Cheng, Zhuoqi; DallÁlba, Diego; Foti, Simone; Mariani, Andrea; Chupin, Thibaud Jean Eudes; Caldwell, Darwin Gordon; Ferrigno, Giancarlo; Momi, Elena De; Mattos, Leonardo S; Fiorini, Paolo Design and integration of electrical bio-impedance sensing in surgical robotic tools for tissue identification and display Journal Article Frontiers in Robotics and AI, 6 , pp. 55, 2019, ISBN: 22969144. @article{cheng2019designc, title = {Design and integration of electrical bio-impedance sensing in surgical robotic tools for tissue identification and display}, author = {Zhuoqi Cheng and Diego DallÁlba and Simone Foti and Andrea Mariani and Thibaud Jean Eudes Chupin and Darwin Gordon Caldwell and Giancarlo Ferrigno and Elena De Momi and Leonardo S Mattos and Paolo Fiorini}, doi = {10.3389/frobt.2019.00055}, isbn = {22969144}, year = {2019}, date = {2019-01-01}, journal = {Frontiers in Robotics and AI}, volume = {6}, pages = {55}, publisher = {Frontiers}, abstract = {The integration of intra-operative sensors into surgical robots is a hot research topic since this can significantly facilitate complex surgical procedures by enhancing surgical awareness with real-time tissue information. However, currently available intra-operative sensing technologies are mainly based on image processing and force feedback, which normally require heavy computation or complicated hardware modifications of existing surgical tools. This paper presents the design and integration of electrical bio-impedance sensing into a commercial surgical robot tool, leading to the creation of a novel smart instrument that allows the identification of tissues by simply touching them. In addition, an advanced user interface is designed to provide guidance during the use of the system and to allow augmented-reality visualization of the tissue identification results. The proposed system imposes minor hardware modifications to an existing surgical tool, but adds the capability to provide a wealth of data about the tissue being manipulated. This has great potential to allow the surgeon (or an autonomous robotic system) to better understand the surgical environment. To evaluate the system, a series of ex-vivo experiments were conducted. The experimental results demonstrate that the proposed sensing system can successfully identify different tissue types with 100% classification accuracy. In addition, the user interface was shown to effectively and intuitively guide the user to measure the electrical impedance of the target tissue, presenting the identification results as augmented-reality markers for simple and immediate recognition.}, keywords = {}, pubstate = {published}, tppubtype = {article} } The integration of intra-operative sensors into surgical robots is a hot research topic since this can significantly facilitate complex surgical procedures by enhancing surgical awareness with real-time tissue information. However, currently available intra-operative sensing technologies are mainly based on image processing and force feedback, which normally require heavy computation or complicated hardware modifications of existing surgical tools. This paper presents the design and integration of electrical bio-impedance sensing into a commercial surgical robot tool, leading to the creation of a novel smart instrument that allows the identification of tissues by simply touching them. In addition, an advanced user interface is designed to provide guidance during the use of the system and to allow augmented-reality visualization of the tissue identification results. The proposed system imposes minor hardware modifications to an existing surgical tool, but adds the capability to provide a wealth of data about the tissue being manipulated. This has great potential to allow the surgeon (or an autonomous robotic system) to better understand the surgical environment. To evaluate the system, a series of ex-vivo experiments were conducted. The experimental results demonstrate that the proposed sensing system can successfully identify different tissue types with 100% classification accuracy. In addition, the user interface was shown to effectively and intuitively guide the user to measure the electrical impedance of the target tissue, presenting the identification results as augmented-reality markers for simple and immediate recognition. |
Nakawala, Hirenkumar; Momi, Elena De; Bianchi, Roberto; Catellani, Michele; Cobelli, Ottavio De; Jannin, Pierre; Ferrigno, Giancarlo; Fiorini, Paolo Toward a Neural-Symbolic Framework for Automated Workflow Analysis in Surgery Inproceedings Mediterranean Conference on Medical and Biological Engineering and Computing, pp. 1551–1558, Springer 2019, ISSN: 16800737. @inproceedings{nakawala2019toward, title = {Toward a Neural-Symbolic Framework for Automated Workflow Analysis in Surgery}, author = {Hirenkumar Nakawala and Elena De Momi and Roberto Bianchi and Michele Catellani and Ottavio De Cobelli and Pierre Jannin and Giancarlo Ferrigno and Paolo Fiorini}, doi = {https://doi.org/10.1007/978-3-030-31635-8_192}, issn = {16800737}, year = {2019}, date = {2019-01-01}, booktitle = {Mediterranean Conference on Medical and Biological Engineering and Computing}, pages = {1551--1558}, organization = {Springer}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } |
2018 |
Nakawala, Hirenkumar; Goncalves, Paulo JS; Fiorini, Paolo; Ferringo, Giancarlo; Momi, Elena De Approaches for action sequence representation in robotics: a review Inproceedings 2018 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pp. 5666–5671, IEEE 2018, ISSN: 2153-0866. @inproceedings{nakawala2018approaches, title = {Approaches for action sequence representation in robotics: a review}, author = {Hirenkumar Nakawala and Paulo JS Goncalves and Paolo Fiorini and Giancarlo Ferringo and Elena De Momi}, doi = {10.1109/IROS.2018.8594256}, issn = {2153-0866}, year = {2018}, date = {2018-10-01}, booktitle = {2018 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}, pages = {5666--5671}, organization = {IEEE}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } |
S. Foti A. Mariani, Chupin Dall'Alba Cheng Mattos Caldwell Fiorini De Momi T D Z L D P E; Ferrigno, G Advanced User Interface for Augmented Information Display on Endoscopic Surgical Images Workshop 8th joint workshop on New Technologies for Computer/Robot Assisted Surgery CRAS, London (UK), 2018. @workshop{Foti2018, title = {Advanced User Interface for Augmented Information Display on Endoscopic Surgical Images}, author = {S. Foti, A. Mariani, T. Chupin, D. Dall'Alba, Z. Cheng, L. Mattos, D. Caldwell, P. Fiorini, E. De Momi and G. Ferrigno}, editor = {8th joint workshop on New Technologies for Computer/Robot Assisted Surgery}, url = {https://iris.univr.it/}, year = {2018}, date = {2018-09-10}, publisher = {8th joint workshop on New Technologies for Computer/Robot Assisted Surgery CRAS, London (UK)}, keywords = {}, pubstate = {published}, tppubtype = {workshop} } |